X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Farm%2Fmm%2Ffault-armv.c;h=cf95c5d0ce4cdb60f4ea317f07cde2c8de0764ba;hb=refs%2Fheads%2Fvserver;hp=5f0a790940ef735b98bfebad5b2c4e5ead60e363;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 5f0a79094..cf95c5d0c 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE; /* * We take the easy way out of this problem - we make the * PTE uncacheable. However, we leave the write buffer on. + * + * Note that the pte lock held when calling update_mmu_cache must also + * guard the pte (somewhere else in the same mm) that we modify here. + * Therefore those configurations which might call adjust_pte (those + * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) { @@ -54,9 +59,9 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) * fault (ie, is old), we can safely ignore any issues. */ if (pte_present(entry) && pte_val(entry) & shared_pte_mask) { - flush_cache_page(vma, address); + flush_cache_page(vma, address, pte_pfn(entry)); pte_val(entry) &= ~shared_pte_mask; - set_pte(pte, entry); + set_pte_at(vma->vm_mm, address, pte, entry); flush_tlb_page(vma, address); ret = 1; } @@ -77,9 +82,8 @@ no_pmd: } static void -make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) +make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { - struct address_space *mapping = page_mapping(page); struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; struct prio_tree_iter iter; @@ -87,9 +91,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgoff_t pgoff; int aliases = 0; - if (!mapping) - return; - pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); /* @@ -115,9 +116,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, if (aliases) adjust_pte(vma, addr); else - flush_cache_page(vma, addr); + flush_cache_page(vma, addr, pfn); } +void __flush_dcache_page(struct address_space *mapping, struct page *page); + /* * Take care of architecture specific things when placing a new PTE into * a page table, or changing an existing PTE. Basically, there are two @@ -129,34 +132,27 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, * 2. If we have multiple shared mappings of the same space in * an object, we need to deal with the cache aliasing issues. * - * Note that the page_table_lock will be held. + * Note that the pte lock will be held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long pfn = pte_pfn(pte); + struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) return; + page = pfn_to_page(pfn); - if (page_mapping(page)) { + mapping = page_mapping(page); + if (mapping) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - /* - * This is our first userspace mapping of this page. - * Ensure that the physical page is coherent with - * the kernel mapping. - * - * FIXME: only need to do this on VIVT and aliasing - * VIPT cache architectures. We can do that - * by choosing whether to set this bit... - */ - __cpuc_flush_dcache_page(page_address(page)); - } + if (dirty) + __flush_dcache_page(mapping, page); if (cache_is_vivt()) - make_coherent(vma, addr, page, dirty); + make_coherent(mapping, vma, addr, pfn); } }