2 * linux/arch/arm/mm/fault-armv.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/vmalloc.h>
16 #include <linux/init.h>
17 #include <linux/pagemap.h>
19 #include <asm/cacheflush.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
23 static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
26 * We take the easy way out of this problem - we make the
27 * PTE uncacheable. However, we leave the write buffer on.
29 static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
36 pgd = pgd_offset(vma->vm_mm, address);
42 pmd = pmd_offset(pgd, address);
48 pte = pte_offset_map(pmd, address);
52 * If this page isn't present, or is already setup to
53 * fault (ie, is old), we can safely ignore any issues.
55 if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
56 flush_cache_page(vma, address);
57 pte_val(entry) &= ~shared_pte_mask;
59 flush_tlb_page(vma, address);
78 void __flush_dcache_page(struct page *page)
80 struct address_space *mapping = page_mapping(page);
81 struct mm_struct *mm = current->active_mm;
82 struct vm_area_struct *mpnt = NULL;
83 struct prio_tree_iter iter;
87 __cpuc_flush_dcache_page(page_address(page));
93 * With a VIVT cache, we need to also write back
94 * and invalidate any user data.
96 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
98 flush_dcache_mmap_lock(mapping);
99 while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
100 &iter, pgoff, pgoff)) != NULL) {
102 * If this VMA is not in our MM, we can ignore it.
104 if (mpnt->vm_mm != mm)
106 if (!(mpnt->vm_flags & VM_MAYSHARE))
108 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
109 flush_cache_page(mpnt, mpnt->vm_start + offset);
111 flush_dcache_mmap_unlock(mapping);
115 make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
117 struct address_space *mapping = page_mapping(page);
118 struct mm_struct *mm = vma->vm_mm;
119 struct vm_area_struct *mpnt = NULL;
120 struct prio_tree_iter iter;
121 unsigned long offset;
128 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
131 * If we have any shared mappings that are in the same mm
132 * space, then we need to handle them specially to maintain
135 flush_dcache_mmap_lock(mapping);
136 while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
137 &iter, pgoff, pgoff)) != NULL) {
139 * If this VMA is not in our MM, we can ignore it.
140 * Note that we intentionally mask out the VMA
141 * that we are fixing up.
143 if (mpnt->vm_mm != mm || mpnt == vma)
145 if (!(mpnt->vm_flags & VM_MAYSHARE))
147 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
148 aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
150 flush_dcache_mmap_unlock(mapping);
152 adjust_pte(vma, addr);
154 flush_cache_page(vma, addr);
158 * Take care of architecture specific things when placing a new PTE into
159 * a page table, or changing an existing PTE. Basically, there are two
160 * things that we need to take care of:
162 * 1. If PG_dcache_dirty is set for the page, we need to ensure
163 * that any cache entries for the kernels virtual memory
164 * range are written back to the page.
165 * 2. If we have multiple shared mappings of the same space in
166 * an object, we need to deal with the cache aliasing issues.
168 * Note that the page_table_lock will be held.
170 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
172 unsigned long pfn = pte_pfn(pte);
177 page = pfn_to_page(pfn);
178 if (page_mapping(page)) {
179 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
182 __cpuc_flush_dcache_page(page_address(page));
184 make_coherent(vma, addr, page, dirty);
189 * Check whether the write buffer has physical address aliasing
190 * issues. If it has, we need to avoid them for the case where
191 * we have several shared mappings of the same object in user
194 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
196 register unsigned long zero = 0, one = 1, val;
210 void __init check_writebuffer_bugs(void)
216 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
218 page = alloc_page(GFP_KERNEL);
220 unsigned long *p1, *p2;
221 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
222 L_PTE_DIRTY|L_PTE_WRITE|
225 p1 = vmap(&page, 1, VM_IOREMAP, prot);
226 p2 = vmap(&page, 1, VM_IOREMAP, prot);
229 v = check_writebuffer(p1, p2);
230 reason = "enabling work-around";
232 reason = "unable to map memory\n";
239 reason = "unable to grab page\n";
243 printk("failed, %s\n", reason);
244 shared_pte_mask |= L_PTE_BUFFERABLE;