2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
22 * - the page->mapcount field is protected by the PG_maplock bit,
23 * which nests within the mm->page_table_lock,
24 * which nests within the page lock.
25 * - because swapout locking is opposite to the locking order
26 * in the page fault path, the swapout path uses trylocks
27 * on the mm->page_table_lock
30 #include <linux/pagemap.h>
31 #include <linux/swap.h>
32 #include <linux/swapops.h>
33 #include <linux/slab.h>
34 #include <linux/init.h>
35 #include <linux/rmap.h>
37 #include <asm/tlbflush.h>
39 //#define RMAP_DEBUG /* can be enabled only for debugging */
41 kmem_cache_t *anon_vma_cachep;
43 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
46 struct anon_vma *anon_vma = find_vma->anon_vma;
47 struct vm_area_struct *vma;
48 unsigned int mapcount = 0;
51 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
53 BUG_ON(mapcount > 100000);
61 /* This must be called under the mmap_sem. */
62 int anon_vma_prepare(struct vm_area_struct *vma)
64 struct anon_vma *anon_vma = vma->anon_vma;
67 if (unlikely(!anon_vma)) {
68 struct mm_struct *mm = vma->vm_mm;
69 struct anon_vma *allocated = NULL;
71 anon_vma = find_mergeable_anon_vma(vma);
73 anon_vma = anon_vma_alloc();
74 if (unlikely(!anon_vma))
79 /* page_table_lock to protect against threads */
80 spin_lock(&mm->page_table_lock);
81 if (likely(!vma->anon_vma)) {
82 vma->anon_vma = anon_vma;
83 list_add(&vma->anon_vma_node, &anon_vma->head);
86 spin_unlock(&mm->page_table_lock);
87 if (unlikely(allocated))
88 anon_vma_free(allocated);
93 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
96 BUG_ON(!next->anon_vma);
97 vma->anon_vma = next->anon_vma;
98 list_add(&vma->anon_vma_node, &next->anon_vma_node);
100 /* if they're both non-null they must be the same */
101 BUG_ON(vma->anon_vma != next->anon_vma);
103 list_del(&next->anon_vma_node);
106 void __anon_vma_link(struct vm_area_struct *vma)
108 struct anon_vma *anon_vma = vma->anon_vma;
111 list_add(&vma->anon_vma_node, &anon_vma->head);
112 validate_anon_vma(vma);
116 void anon_vma_link(struct vm_area_struct *vma)
118 struct anon_vma *anon_vma = vma->anon_vma;
121 spin_lock(&anon_vma->lock);
122 list_add(&vma->anon_vma_node, &anon_vma->head);
123 validate_anon_vma(vma);
124 spin_unlock(&anon_vma->lock);
128 void anon_vma_unlink(struct vm_area_struct *vma)
130 struct anon_vma *anon_vma = vma->anon_vma;
136 spin_lock(&anon_vma->lock);
137 validate_anon_vma(vma);
138 list_del(&vma->anon_vma_node);
140 /* We must garbage collect the anon_vma if it's empty */
141 empty = list_empty(&anon_vma->head);
142 spin_unlock(&anon_vma->lock);
145 anon_vma_free(anon_vma);
148 static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
150 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
151 SLAB_CTOR_CONSTRUCTOR) {
152 struct anon_vma *anon_vma = data;
154 spin_lock_init(&anon_vma->lock);
155 INIT_LIST_HEAD(&anon_vma->head);
159 void __init anon_vma_init(void)
161 anon_vma_cachep = kmem_cache_create("anon_vma",
162 sizeof(struct anon_vma), 0, SLAB_PANIC, anon_vma_ctor, NULL);
165 /* this needs the page->flags PG_maplock held */
166 static inline void clear_page_anon(struct page *page)
168 BUG_ON(!page->mapping);
169 page->mapping = NULL;
174 * At what user virtual address is page expected in vma?
176 static inline unsigned long
177 vma_address(struct page *page, struct vm_area_struct *vma)
179 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
180 unsigned long address;
182 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
183 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
184 /* page should be within any vma from prio_tree_next */
185 BUG_ON(!PageAnon(page));
192 * Subfunctions of page_referenced: page_referenced_one called
193 * repeatedly from either page_referenced_anon or page_referenced_file.
195 static int page_referenced_one(struct page *page,
196 struct vm_area_struct *vma, unsigned int *mapcount)
198 struct mm_struct *mm = vma->vm_mm;
199 unsigned long address;
207 address = vma_address(page, vma);
208 if (address == -EFAULT)
211 if (!spin_trylock(&mm->page_table_lock))
214 pgd = pgd_offset(mm, address);
215 if (!pgd_present(*pgd))
218 pmd = pmd_offset(pgd, address);
219 if (!pmd_present(*pmd))
222 pte = pte_offset_map(pmd, address);
223 if (!pte_present(*pte))
226 if (page_to_pfn(page) != pte_pfn(*pte))
229 if (ptep_test_and_clear_young(pte))
237 spin_unlock(&mm->page_table_lock);
242 static inline int page_referenced_anon(struct page *page)
244 unsigned int mapcount = page->mapcount;
245 struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
246 struct vm_area_struct *vma;
249 spin_lock(&anon_vma->lock);
250 BUG_ON(list_empty(&anon_vma->head));
251 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
252 referenced += page_referenced_one(page, vma, &mapcount);
256 spin_unlock(&anon_vma->lock);
261 * page_referenced_file - referenced check for object-based rmap
262 * @page: the page we're checking references on.
264 * For an object-based mapped page, find all the places it is mapped and
265 * check/clear the referenced flag. This is done by following the page->mapping
266 * pointer, then walking the chain of vmas it holds. It returns the number
267 * of references it found.
269 * This function is only called from page_referenced for object-based pages.
271 * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
272 * assume a reference count of 0, so try_to_unmap will then have a go.
274 static inline int page_referenced_file(struct page *page)
276 unsigned int mapcount = page->mapcount;
277 struct address_space *mapping = page->mapping;
278 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
279 struct vm_area_struct *vma = NULL;
280 struct prio_tree_iter iter;
283 if (!spin_trylock(&mapping->i_mmap_lock))
286 while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
287 &iter, pgoff, pgoff)) != NULL) {
288 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
289 == (VM_LOCKED|VM_MAYSHARE)) {
293 referenced += page_referenced_one(page, vma, &mapcount);
298 spin_unlock(&mapping->i_mmap_lock);
303 * page_referenced - test if the page was referenced
304 * @page: the page to test
306 * Quick test_and_clear_referenced for all mappings to a page,
307 * returns the number of ptes which referenced the page.
308 * Caller needs to hold the rmap lock.
310 int page_referenced(struct page *page)
314 if (page_test_and_clear_young(page))
317 if (TestClearPageReferenced(page))
320 if (page->mapcount && page->mapping) {
322 referenced += page_referenced_anon(page);
324 referenced += page_referenced_file(page);
330 * page_add_anon_rmap - add pte mapping to an anonymous page
331 * @page: the page to add the mapping to
332 * @vma: the vm area in which the mapping is added
333 * @address: the user virtual address mapped
335 * The caller needs to hold the mm->page_table_lock.
337 void page_add_anon_rmap(struct page *page,
338 struct vm_area_struct *vma, unsigned long address)
340 struct anon_vma *anon_vma = vma->anon_vma;
343 BUG_ON(PageReserved(page));
346 index = (address - vma->vm_start) >> PAGE_SHIFT;
347 index += vma->vm_pgoff;
348 index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
351 * Setting and clearing PG_anon must always happen inside
352 * page_map_lock to avoid races between mapping and
353 * unmapping on different processes of the same
354 * shared cow swapcache page. And while we take the
355 * page_map_lock PG_anon cannot change from under us.
356 * Actually PG_anon cannot change under fork either
357 * since fork holds a reference on the page so it cannot
358 * be unmapped under fork and in turn copy_page_range is
359 * allowed to read PG_anon outside the page_map_lock.
362 if (!page->mapcount) {
363 BUG_ON(PageAnon(page));
364 BUG_ON(page->mapping);
367 page->mapping = (struct address_space *) anon_vma;
368 inc_page_state(nr_mapped);
370 BUG_ON(!PageAnon(page));
371 BUG_ON(page->index != index);
372 BUG_ON(page->mapping != (struct address_space *) anon_vma);
375 page_map_unlock(page);
379 * page_add_file_rmap - add pte mapping to a file page
380 * @page: the page to add the mapping to
382 * The caller needs to hold the mm->page_table_lock.
384 void page_add_file_rmap(struct page *page)
386 BUG_ON(PageAnon(page));
387 if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
392 inc_page_state(nr_mapped);
394 page_map_unlock(page);
398 * page_remove_rmap - take down pte mapping from a page
399 * @page: page to remove mapping from
401 * Caller needs to hold the mm->page_table_lock.
403 void page_remove_rmap(struct page *page)
405 BUG_ON(PageReserved(page));
406 BUG_ON(!page->mapcount);
410 if (!page->mapcount) {
411 if (page_test_and_clear_dirty(page))
412 set_page_dirty(page);
414 clear_page_anon(page);
415 dec_page_state(nr_mapped);
417 page_map_unlock(page);
421 * Subfunctions of try_to_unmap: try_to_unmap_one called
422 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
424 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
426 struct mm_struct *mm = vma->vm_mm;
427 unsigned long address;
432 int ret = SWAP_AGAIN;
436 address = vma_address(page, vma);
437 if (address == -EFAULT)
441 * We need the page_table_lock to protect us from page faults,
442 * munmap, fork, etc...
444 if (!spin_trylock(&mm->page_table_lock))
447 pgd = pgd_offset(mm, address);
448 if (!pgd_present(*pgd))
451 pmd = pmd_offset(pgd, address);
452 if (!pmd_present(*pmd))
455 pte = pte_offset_map(pmd, address);
456 if (!pte_present(*pte))
459 if (page_to_pfn(page) != pte_pfn(*pte))
463 * If the page is mlock()d, we cannot swap it out.
464 * If it's recently referenced (perhaps page_referenced
465 * skipped over this mm) then we should reactivate it.
467 if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
468 ptep_test_and_clear_young(pte)) {
474 * Don't pull an anonymous page out from under get_user_pages.
475 * GUP carefully breaks COW and raises page count (while holding
476 * page_table_lock, as we have here) to make sure that the page
477 * cannot be freed. If we unmap that page here, a user write
478 * access to the virtual address will bring back the page, but
479 * its raised count will (ironically) be taken to mean it's not
480 * an exclusive swap page, do_wp_page will replace it by a copy
481 * page, and the user never get to see the data GUP was holding
482 * the original page for.
484 if (PageSwapCache(page) &&
485 page_count(page) != page->mapcount + 2) {
490 /* Nuke the page table entry. */
491 flush_cache_page(vma, address);
492 pteval = ptep_clear_flush(vma, address, pte);
494 /* Move the dirty bit to the physical page now the pte is gone. */
495 if (pte_dirty(pteval))
496 set_page_dirty(page);
498 if (PageAnon(page)) {
499 swp_entry_t entry = { .val = page->private };
501 * Store the swap location in the pte.
502 * See handle_pte_fault() ...
504 BUG_ON(!PageSwapCache(page));
505 swap_duplicate(entry);
506 set_pte(pte, swp_entry_to_pte(entry));
507 BUG_ON(pte_file(*pte));
511 BUG_ON(!page->mapcount);
513 page_cache_release(page);
518 spin_unlock(&mm->page_table_lock);
524 * objrmap doesn't work for nonlinear VMAs because the assumption that
525 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
526 * Consequently, given a particular page and its ->index, we cannot locate the
527 * ptes which are mapping that page without an exhaustive linear search.
529 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
530 * maps the file to which the target page belongs. The ->vm_private_data field
531 * holds the current cursor into that scan. Successive searches will circulate
532 * around the vma's virtual address space.
534 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
535 * more scanning pressure is placed against them as well. Eventually pages
536 * will become fully unmapped and are eligible for eviction.
538 * For very sparsely populated VMAs this is a little inefficient - chances are
539 * there there won't be many ptes located within the scan cluster. In this case
540 * maybe we could scan further - to the end of the pte page, perhaps.
542 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
543 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
545 static int try_to_unmap_cluster(unsigned long cursor,
546 unsigned int *mapcount, struct vm_area_struct *vma)
548 struct mm_struct *mm = vma->vm_mm;
554 unsigned long address;
559 * We need the page_table_lock to protect us from page faults,
560 * munmap, fork, etc...
562 if (!spin_trylock(&mm->page_table_lock))
565 address = (vma->vm_start + cursor) & CLUSTER_MASK;
566 end = address + CLUSTER_SIZE;
567 if (address < vma->vm_start)
568 address = vma->vm_start;
569 if (end > vma->vm_end)
572 pgd = pgd_offset(mm, address);
573 if (!pgd_present(*pgd))
576 pmd = pmd_offset(pgd, address);
577 if (!pmd_present(*pmd))
580 for (pte = pte_offset_map(pmd, address);
581 address < end; pte++, address += PAGE_SIZE) {
583 if (!pte_present(*pte))
590 page = pfn_to_page(pfn);
591 BUG_ON(PageAnon(page));
592 if (PageReserved(page))
595 if (ptep_test_and_clear_young(pte))
598 /* Nuke the page table entry. */
599 flush_cache_page(vma, address);
600 pteval = ptep_clear_flush(vma, address, pte);
602 /* If nonlinear, store the file page offset in the pte. */
603 if (page->index != linear_page_index(vma, address))
604 set_pte(pte, pgoff_to_pte(page->index));
606 /* Move the dirty bit to the physical page now the pte is gone. */
607 if (pte_dirty(pteval))
608 set_page_dirty(page);
610 page_remove_rmap(page);
611 page_cache_release(page);
619 spin_unlock(&mm->page_table_lock);
623 static inline int try_to_unmap_anon(struct page *page)
625 struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
626 struct vm_area_struct *vma;
627 int ret = SWAP_AGAIN;
629 spin_lock(&anon_vma->lock);
630 BUG_ON(list_empty(&anon_vma->head));
631 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
632 ret = try_to_unmap_one(page, vma);
633 if (ret == SWAP_FAIL || !page->mapcount)
636 spin_unlock(&anon_vma->lock);
641 * try_to_unmap_file - unmap file page using the object-based rmap method
642 * @page: the page to unmap
644 * Find all the mappings of a page using the mapping pointer and the vma chains
645 * contained in the address_space struct it points to.
647 * This function is only called from try_to_unmap for object-based pages.
649 * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
650 * return a temporary error.
652 static inline int try_to_unmap_file(struct page *page)
654 struct address_space *mapping = page->mapping;
655 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
656 struct vm_area_struct *vma = NULL;
657 struct prio_tree_iter iter;
658 int ret = SWAP_AGAIN;
659 unsigned long cursor;
660 unsigned long max_nl_cursor = 0;
661 unsigned long max_nl_size = 0;
662 unsigned int mapcount;
664 if (!spin_trylock(&mapping->i_mmap_lock))
667 while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
668 &iter, pgoff, pgoff)) != NULL) {
669 ret = try_to_unmap_one(page, vma);
670 if (ret == SWAP_FAIL || !page->mapcount)
674 if (list_empty(&mapping->i_mmap_nonlinear))
677 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
678 shared.vm_set.list) {
679 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
681 cursor = (unsigned long) vma->vm_private_data;
682 if (cursor > max_nl_cursor)
683 max_nl_cursor = cursor;
684 cursor = vma->vm_end - vma->vm_start;
685 if (cursor > max_nl_size)
686 max_nl_size = cursor;
689 if (max_nl_size == 0) /* any nonlinears locked or reserved */
693 * We don't try to search for this page in the nonlinear vmas,
694 * and page_referenced wouldn't have found it anyway. Instead
695 * just walk the nonlinear vmas trying to age and unmap some.
696 * The mapcount of the page we came in with is irrelevant,
697 * but even so use it as a guide to how hard we should try?
699 mapcount = page->mapcount;
700 page_map_unlock(page);
701 cond_resched_lock(&mapping->i_mmap_lock);
703 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
704 if (max_nl_cursor == 0)
705 max_nl_cursor = CLUSTER_SIZE;
708 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
709 shared.vm_set.list) {
710 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
712 cursor = (unsigned long) vma->vm_private_data;
713 while (vma->vm_mm->rss &&
714 cursor < max_nl_cursor &&
715 cursor < vma->vm_end - vma->vm_start) {
716 ret = try_to_unmap_cluster(
717 cursor, &mapcount, vma);
718 if (ret == SWAP_FAIL)
720 cursor += CLUSTER_SIZE;
721 vma->vm_private_data = (void *) cursor;
722 if ((int)mapcount <= 0)
725 if (ret != SWAP_FAIL)
726 vma->vm_private_data =
727 (void *) max_nl_cursor;
730 cond_resched_lock(&mapping->i_mmap_lock);
731 max_nl_cursor += CLUSTER_SIZE;
732 } while (max_nl_cursor <= max_nl_size);
735 * Don't loop forever (perhaps all the remaining pages are
736 * in locked vmas). Reset cursor on all unreserved nonlinear
737 * vmas, now forgetting on which ones it had fallen behind.
739 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
740 shared.vm_set.list) {
741 if (!(vma->vm_flags & VM_RESERVED))
742 vma->vm_private_data = 0;
747 spin_unlock(&mapping->i_mmap_lock);
752 * try_to_unmap - try to remove all page table mappings to a page
753 * @page: the page to get unmapped
755 * Tries to remove all the page table entries which are mapping this
756 * page, used in the pageout path. Caller must hold the page lock
757 * and its rmap lock. Return values are:
759 * SWAP_SUCCESS - we succeeded in removing all mappings
760 * SWAP_AGAIN - we missed a trylock, try again later
761 * SWAP_FAIL - the page is unswappable
763 int try_to_unmap(struct page *page)
767 BUG_ON(PageReserved(page));
768 BUG_ON(!PageLocked(page));
769 BUG_ON(!page->mapcount);
772 ret = try_to_unmap_anon(page);
774 ret = try_to_unmap_file(page);
776 if (!page->mapcount) {
777 if (page_test_and_clear_dirty(page))
778 set_page_dirty(page);
780 clear_page_anon(page);
781 dec_page_state(nr_mapped);