2 * linux/arch/arm/mm/fault-armv.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/vmalloc.h>
16 #include <linux/init.h>
18 #include <asm/cacheflush.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
22 static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
25 * We take the easy way out of this problem - we make the
26 * PTE uncacheable. However, we leave the write buffer on.
28 static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
35 pgd = pgd_offset(vma->vm_mm, address);
41 pmd = pmd_offset(pgd, address);
47 pte = pte_offset_map(pmd, address);
51 * If this page isn't present, or is already setup to
52 * fault (ie, is old), we can safely ignore any issues.
54 if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
55 flush_cache_page(vma, address);
56 pte_val(entry) &= ~shared_pte_mask;
58 flush_tlb_page(vma, address);
77 void __flush_dcache_page(struct page *page)
79 struct address_space *mapping = page_mapping(page);
80 struct mm_struct *mm = current->active_mm;
83 __cpuc_flush_dcache_page(page_address(page));
89 * With a VIVT cache, we need to also write back
90 * and invalidate any user data.
92 list_for_each(l, &mapping->i_mmap_shared) {
93 struct vm_area_struct *mpnt;
96 mpnt = list_entry(l, struct vm_area_struct, shared);
99 * If this VMA is not in our MM, we can ignore it.
101 if (mpnt->vm_mm != mm)
104 if (page->index < mpnt->vm_pgoff)
107 off = page->index - mpnt->vm_pgoff;
108 if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
111 flush_cache_page(mpnt, mpnt->vm_start + (off << PAGE_SHIFT));
116 make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
118 struct address_space *mapping = page_mapping(page);
120 struct mm_struct *mm = vma->vm_mm;
127 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
130 * If we have any shared mappings that are in the same mm
131 * space, then we need to handle them specially to maintain
134 list_for_each(l, &mapping->i_mmap_shared) {
135 struct vm_area_struct *mpnt;
138 mpnt = list_entry(l, struct vm_area_struct, shared);
141 * If this VMA is not in our MM, we can ignore it.
142 * Note that we intentionally mask out the VMA
143 * that we are fixing up.
145 if (mpnt->vm_mm != mm || mpnt == vma)
149 * If the page isn't in this VMA, we can also ignore it.
151 if (pgoff < mpnt->vm_pgoff)
154 off = pgoff - mpnt->vm_pgoff;
155 if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
158 off = mpnt->vm_start + (off << PAGE_SHIFT);
161 * Ok, it is within mpnt. Fix it up.
163 aliases += adjust_pte(mpnt, off);
166 adjust_pte(vma, addr);
168 flush_cache_page(vma, addr);
172 * Take care of architecture specific things when placing a new PTE into
173 * a page table, or changing an existing PTE. Basically, there are two
174 * things that we need to take care of:
176 * 1. If PG_dcache_dirty is set for the page, we need to ensure
177 * that any cache entries for the kernels virtual memory
178 * range are written back to the page.
179 * 2. If we have multiple shared mappings of the same space in
180 * an object, we need to deal with the cache aliasing issues.
182 * Note that the page_table_lock will be held.
184 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
186 unsigned long pfn = pte_pfn(pte);
191 page = pfn_to_page(pfn);
192 if (page_mapping(page)) {
193 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
196 __cpuc_flush_dcache_page(page_address(page));
198 make_coherent(vma, addr, page, dirty);
203 * Check whether the write buffer has physical address aliasing
204 * issues. If it has, we need to avoid them for the case where
205 * we have several shared mappings of the same object in user
208 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
210 register unsigned long zero = 0, one = 1, val;
224 void __init check_writebuffer_bugs(void)
230 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
232 page = alloc_page(GFP_KERNEL);
234 unsigned long *p1, *p2;
235 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
236 L_PTE_DIRTY|L_PTE_WRITE|
239 p1 = vmap(&page, 1, VM_IOREMAP, prot);
240 p2 = vmap(&page, 1, VM_IOREMAP, prot);
243 v = check_writebuffer(p1, p2);
244 reason = "enabling work-around";
246 reason = "unable to map memory\n";
253 reason = "unable to grab page\n";
257 printk("failed, %s\n", reason);
258 shared_pte_mask |= L_PTE_BUFFERABLE;