2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Locking: see "Lock ordering" summary in filemap.c.
22 * In swapout, page_map_lock is held on entry to page_referenced and
23 * try_to_unmap, so they trylock for i_mmap_lock and page_table_lock.
27 #include <linux/pagemap.h>
28 #include <linux/swap.h>
29 #include <linux/swapops.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/rmap.h>
33 #include <linux/vs_memory.h>
35 #include <asm/tlbflush.h>
37 //#define RMAP_DEBUG /* can be enabled only for debugging */
39 kmem_cache_t *anon_vma_cachep;
41 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
44 struct anon_vma *anon_vma = find_vma->anon_vma;
45 struct vm_area_struct *vma;
46 unsigned int mapcount = 0;
49 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
51 BUG_ON(mapcount > 100000);
59 /* This must be called under the mmap_sem. */
60 int anon_vma_prepare(struct vm_area_struct *vma)
62 struct anon_vma *anon_vma = vma->anon_vma;
65 if (unlikely(!anon_vma)) {
66 struct mm_struct *mm = vma->vm_mm;
67 struct anon_vma *allocated = NULL;
69 anon_vma = find_mergeable_anon_vma(vma);
71 anon_vma = anon_vma_alloc();
72 if (unlikely(!anon_vma))
77 /* page_table_lock to protect against threads */
78 spin_lock(&mm->page_table_lock);
79 if (likely(!vma->anon_vma)) {
81 spin_lock(&anon_vma->lock);
82 vma->anon_vma = anon_vma;
83 list_add(&vma->anon_vma_node, &anon_vma->head);
85 spin_unlock(&anon_vma->lock);
88 spin_unlock(&mm->page_table_lock);
89 if (unlikely(allocated))
90 anon_vma_free(allocated);
95 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
98 BUG_ON(!next->anon_vma);
99 vma->anon_vma = next->anon_vma;
100 list_add(&vma->anon_vma_node, &next->anon_vma_node);
102 /* if they're both non-null they must be the same */
103 BUG_ON(vma->anon_vma != next->anon_vma);
105 list_del(&next->anon_vma_node);
108 void __anon_vma_link(struct vm_area_struct *vma)
110 struct anon_vma *anon_vma = vma->anon_vma;
113 list_add(&vma->anon_vma_node, &anon_vma->head);
114 validate_anon_vma(vma);
118 void anon_vma_link(struct vm_area_struct *vma)
120 struct anon_vma *anon_vma = vma->anon_vma;
123 spin_lock(&anon_vma->lock);
124 list_add(&vma->anon_vma_node, &anon_vma->head);
125 validate_anon_vma(vma);
126 spin_unlock(&anon_vma->lock);
130 void anon_vma_unlink(struct vm_area_struct *vma)
132 struct anon_vma *anon_vma = vma->anon_vma;
138 spin_lock(&anon_vma->lock);
139 validate_anon_vma(vma);
140 list_del(&vma->anon_vma_node);
142 /* We must garbage collect the anon_vma if it's empty */
143 empty = list_empty(&anon_vma->head);
144 spin_unlock(&anon_vma->lock);
147 anon_vma_free(anon_vma);
150 static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
152 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
153 SLAB_CTOR_CONSTRUCTOR) {
154 struct anon_vma *anon_vma = data;
156 spin_lock_init(&anon_vma->lock);
157 INIT_LIST_HEAD(&anon_vma->head);
161 void __init anon_vma_init(void)
163 anon_vma_cachep = kmem_cache_create("anon_vma",
164 sizeof(struct anon_vma), 0, SLAB_PANIC, anon_vma_ctor, NULL);
167 /* this needs the page->flags PG_maplock held */
168 static inline void clear_page_anon(struct page *page)
170 BUG_ON(!page->mapping);
171 page->mapping = NULL;
176 * At what user virtual address is page expected in vma?
178 static inline unsigned long
179 vma_address(struct page *page, struct vm_area_struct *vma)
181 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
182 unsigned long address;
184 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
185 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
186 /* page should be within any vma from prio_tree_next */
187 BUG_ON(!PageAnon(page));
194 * Subfunctions of page_referenced: page_referenced_one called
195 * repeatedly from either page_referenced_anon or page_referenced_file.
197 static int page_referenced_one(struct page *page,
198 struct vm_area_struct *vma, unsigned int *mapcount)
200 struct mm_struct *mm = vma->vm_mm;
201 unsigned long address;
209 address = vma_address(page, vma);
210 if (address == -EFAULT)
213 if (!spin_trylock(&mm->page_table_lock))
216 pgd = pgd_offset(mm, address);
217 if (!pgd_present(*pgd))
220 pmd = pmd_offset(pgd, address);
221 if (!pmd_present(*pmd))
224 pte = pte_offset_map(pmd, address);
225 if (!pte_present(*pte))
228 if (page_to_pfn(page) != pte_pfn(*pte))
231 if (ptep_clear_flush_young(vma, address, pte))
234 if (mm != current->mm && has_swap_token(mm))
242 spin_unlock(&mm->page_table_lock);
247 static inline int page_referenced_anon(struct page *page)
249 unsigned int mapcount = page->mapcount;
250 struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
251 struct vm_area_struct *vma;
254 spin_lock(&anon_vma->lock);
255 BUG_ON(list_empty(&anon_vma->head));
256 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
257 referenced += page_referenced_one(page, vma, &mapcount);
261 spin_unlock(&anon_vma->lock);
266 * page_referenced_file - referenced check for object-based rmap
267 * @page: the page we're checking references on.
269 * For an object-based mapped page, find all the places it is mapped and
270 * check/clear the referenced flag. This is done by following the page->mapping
271 * pointer, then walking the chain of vmas it holds. It returns the number
272 * of references it found.
274 * This function is only called from page_referenced for object-based pages.
276 * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
277 * assume a reference count of 0, so try_to_unmap will then have a go.
279 static inline int page_referenced_file(struct page *page)
281 unsigned int mapcount = page->mapcount;
282 struct address_space *mapping = page->mapping;
283 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
284 struct vm_area_struct *vma = NULL;
285 struct prio_tree_iter iter;
288 if (!spin_trylock(&mapping->i_mmap_lock))
291 while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
292 &iter, pgoff, pgoff)) != NULL) {
293 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
294 == (VM_LOCKED|VM_MAYSHARE)) {
298 referenced += page_referenced_one(page, vma, &mapcount);
303 spin_unlock(&mapping->i_mmap_lock);
308 * page_referenced - test if the page was referenced
309 * @page: the page to test
311 * Quick test_and_clear_referenced for all mappings to a page,
312 * returns the number of ptes which referenced the page.
313 * Caller needs to hold the rmap lock.
315 int page_referenced(struct page *page)
319 if (page_test_and_clear_young(page))
322 if (TestClearPageReferenced(page))
325 if (page->mapcount && page->mapping) {
327 referenced += page_referenced_anon(page);
329 referenced += page_referenced_file(page);
335 * page_add_anon_rmap - add pte mapping to an anonymous page
336 * @page: the page to add the mapping to
337 * @vma: the vm area in which the mapping is added
338 * @address: the user virtual address mapped
340 * The caller needs to hold the mm->page_table_lock.
342 void page_add_anon_rmap(struct page *page,
343 struct vm_area_struct *vma, unsigned long address)
345 struct anon_vma *anon_vma = vma->anon_vma;
348 BUG_ON(PageReserved(page));
351 index = (address - vma->vm_start) >> PAGE_SHIFT;
352 index += vma->vm_pgoff;
353 index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
356 * Setting and clearing PG_anon must always happen inside
357 * page_map_lock to avoid races between mapping and
358 * unmapping on different processes of the same
359 * shared cow swapcache page. And while we take the
360 * page_map_lock PG_anon cannot change from under us.
361 * Actually PG_anon cannot change under fork either
362 * since fork holds a reference on the page so it cannot
363 * be unmapped under fork and in turn copy_page_range is
364 * allowed to read PG_anon outside the page_map_lock.
367 if (!page->mapcount) {
368 BUG_ON(PageAnon(page));
369 BUG_ON(page->mapping);
372 page->mapping = (struct address_space *) anon_vma;
373 inc_page_state(nr_mapped);
375 BUG_ON(!PageAnon(page));
376 BUG_ON(page->index != index);
377 BUG_ON(page->mapping != (struct address_space *) anon_vma);
380 page_map_unlock(page);
384 * page_add_file_rmap - add pte mapping to a file page
385 * @page: the page to add the mapping to
387 * The caller needs to hold the mm->page_table_lock.
389 void page_add_file_rmap(struct page *page)
391 BUG_ON(PageAnon(page));
392 if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
397 inc_page_state(nr_mapped);
399 page_map_unlock(page);
403 * page_remove_rmap - take down pte mapping from a page
404 * @page: page to remove mapping from
406 * Caller needs to hold the mm->page_table_lock.
408 void page_remove_rmap(struct page *page)
410 BUG_ON(PageReserved(page));
411 BUG_ON(!page->mapcount);
415 if (!page->mapcount) {
416 if (page_test_and_clear_dirty(page))
417 set_page_dirty(page);
419 clear_page_anon(page);
420 dec_page_state(nr_mapped);
422 page_map_unlock(page);
426 * Subfunctions of try_to_unmap: try_to_unmap_one called
427 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
429 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
431 struct mm_struct *mm = vma->vm_mm;
432 unsigned long address;
437 int ret = SWAP_AGAIN;
441 address = vma_address(page, vma);
442 if (address == -EFAULT)
446 * We need the page_table_lock to protect us from page faults,
447 * munmap, fork, etc...
449 if (!spin_trylock(&mm->page_table_lock))
452 pgd = pgd_offset(mm, address);
453 if (!pgd_present(*pgd))
456 pmd = pmd_offset(pgd, address);
457 if (!pmd_present(*pmd))
460 pte = pte_offset_map(pmd, address);
461 if (!pte_present(*pte))
464 if (page_to_pfn(page) != pte_pfn(*pte))
468 * If the page is mlock()d, we cannot swap it out.
469 * If it's recently referenced (perhaps page_referenced
470 * skipped over this mm) then we should reactivate it.
472 if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
473 ptep_clear_flush_young(vma, address, pte)) {
479 * Don't pull an anonymous page out from under get_user_pages.
480 * GUP carefully breaks COW and raises page count (while holding
481 * page_table_lock, as we have here) to make sure that the page
482 * cannot be freed. If we unmap that page here, a user write
483 * access to the virtual address will bring back the page, but
484 * its raised count will (ironically) be taken to mean it's not
485 * an exclusive swap page, do_wp_page will replace it by a copy
486 * page, and the user never get to see the data GUP was holding
487 * the original page for.
489 * This test is also useful for when swapoff (unuse_process) has
490 * to drop page lock: its reference to the page stops existing
491 * ptes from being unmapped, so swapoff can make progress.
493 if (PageSwapCache(page) &&
494 page_count(page) != page->mapcount + 2) {
499 /* Nuke the page table entry. */
500 flush_cache_page(vma, address);
501 pteval = ptep_clear_flush(vma, address, pte);
503 /* Move the dirty bit to the physical page now the pte is gone. */
504 if (pte_dirty(pteval))
505 set_page_dirty(page);
507 if (PageAnon(page)) {
508 swp_entry_t entry = { .val = page->private };
510 * Store the swap location in the pte.
511 * See handle_pte_fault() ...
513 BUG_ON(!PageSwapCache(page));
514 swap_duplicate(entry);
515 set_pte(pte, swp_entry_to_pte(entry));
516 BUG_ON(pte_file(*pte));
521 BUG_ON(!page->mapcount);
523 page_cache_release(page);
528 spin_unlock(&mm->page_table_lock);
534 * objrmap doesn't work for nonlinear VMAs because the assumption that
535 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
536 * Consequently, given a particular page and its ->index, we cannot locate the
537 * ptes which are mapping that page without an exhaustive linear search.
539 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
540 * maps the file to which the target page belongs. The ->vm_private_data field
541 * holds the current cursor into that scan. Successive searches will circulate
542 * around the vma's virtual address space.
544 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
545 * more scanning pressure is placed against them as well. Eventually pages
546 * will become fully unmapped and are eligible for eviction.
548 * For very sparsely populated VMAs this is a little inefficient - chances are
549 * there there won't be many ptes located within the scan cluster. In this case
550 * maybe we could scan further - to the end of the pte page, perhaps.
552 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
553 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
555 static int try_to_unmap_cluster(unsigned long cursor,
556 unsigned int *mapcount, struct vm_area_struct *vma)
558 struct mm_struct *mm = vma->vm_mm;
564 unsigned long address;
569 * We need the page_table_lock to protect us from page faults,
570 * munmap, fork, etc...
572 if (!spin_trylock(&mm->page_table_lock))
575 address = (vma->vm_start + cursor) & CLUSTER_MASK;
576 end = address + CLUSTER_SIZE;
577 if (address < vma->vm_start)
578 address = vma->vm_start;
579 if (end > vma->vm_end)
582 pgd = pgd_offset(mm, address);
583 if (!pgd_present(*pgd))
586 pmd = pmd_offset(pgd, address);
587 if (!pmd_present(*pmd))
590 for (pte = pte_offset_map(pmd, address);
591 address < end; pte++, address += PAGE_SIZE) {
593 if (!pte_present(*pte))
600 page = pfn_to_page(pfn);
601 BUG_ON(PageAnon(page));
602 if (PageReserved(page))
605 if (ptep_clear_flush_young(vma, address, pte))
608 /* Nuke the page table entry. */
609 flush_cache_page(vma, address);
610 pteval = ptep_clear_flush(vma, address, pte);
612 /* If nonlinear, store the file page offset in the pte. */
613 if (page->index != linear_page_index(vma, address))
614 set_pte(pte, pgoff_to_pte(page->index));
616 /* Move the dirty bit to the physical page now the pte is gone. */
617 if (pte_dirty(pteval))
618 set_page_dirty(page);
620 page_remove_rmap(page);
621 page_cache_release(page);
630 spin_unlock(&mm->page_table_lock);
634 static inline int try_to_unmap_anon(struct page *page)
636 struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
637 struct vm_area_struct *vma;
638 int ret = SWAP_AGAIN;
640 spin_lock(&anon_vma->lock);
641 BUG_ON(list_empty(&anon_vma->head));
642 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
643 ret = try_to_unmap_one(page, vma);
644 if (ret == SWAP_FAIL || !page->mapcount)
647 spin_unlock(&anon_vma->lock);
652 * try_to_unmap_file - unmap file page using the object-based rmap method
653 * @page: the page to unmap
655 * Find all the mappings of a page using the mapping pointer and the vma chains
656 * contained in the address_space struct it points to.
658 * This function is only called from try_to_unmap for object-based pages.
660 * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
661 * return a temporary error.
663 static inline int try_to_unmap_file(struct page *page)
665 struct address_space *mapping = page->mapping;
666 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
667 struct vm_area_struct *vma = NULL;
668 struct prio_tree_iter iter;
669 int ret = SWAP_AGAIN;
670 unsigned long cursor;
671 unsigned long max_nl_cursor = 0;
672 unsigned long max_nl_size = 0;
673 unsigned int mapcount;
675 if (!spin_trylock(&mapping->i_mmap_lock))
678 while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
679 &iter, pgoff, pgoff)) != NULL) {
680 ret = try_to_unmap_one(page, vma);
681 if (ret == SWAP_FAIL || !page->mapcount)
685 if (list_empty(&mapping->i_mmap_nonlinear))
688 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
689 shared.vm_set.list) {
690 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
692 cursor = (unsigned long) vma->vm_private_data;
693 if (cursor > max_nl_cursor)
694 max_nl_cursor = cursor;
695 cursor = vma->vm_end - vma->vm_start;
696 if (cursor > max_nl_size)
697 max_nl_size = cursor;
700 if (max_nl_size == 0) /* any nonlinears locked or reserved */
704 * We don't try to search for this page in the nonlinear vmas,
705 * and page_referenced wouldn't have found it anyway. Instead
706 * just walk the nonlinear vmas trying to age and unmap some.
707 * The mapcount of the page we came in with is irrelevant,
708 * but even so use it as a guide to how hard we should try?
710 mapcount = page->mapcount;
711 page_map_unlock(page);
712 cond_resched_lock(&mapping->i_mmap_lock);
714 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
715 if (max_nl_cursor == 0)
716 max_nl_cursor = CLUSTER_SIZE;
719 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
720 shared.vm_set.list) {
721 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
723 cursor = (unsigned long) vma->vm_private_data;
724 while (vma->vm_mm->rss &&
725 cursor < max_nl_cursor &&
726 cursor < vma->vm_end - vma->vm_start) {
727 ret = try_to_unmap_cluster(
728 cursor, &mapcount, vma);
729 if (ret == SWAP_FAIL)
731 cursor += CLUSTER_SIZE;
732 vma->vm_private_data = (void *) cursor;
733 if ((int)mapcount <= 0)
736 if (ret != SWAP_FAIL)
737 vma->vm_private_data =
738 (void *) max_nl_cursor;
741 cond_resched_lock(&mapping->i_mmap_lock);
742 max_nl_cursor += CLUSTER_SIZE;
743 } while (max_nl_cursor <= max_nl_size);
746 * Don't loop forever (perhaps all the remaining pages are
747 * in locked vmas). Reset cursor on all unreserved nonlinear
748 * vmas, now forgetting on which ones it had fallen behind.
750 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
751 shared.vm_set.list) {
752 if (!(vma->vm_flags & VM_RESERVED))
753 vma->vm_private_data = NULL;
758 spin_unlock(&mapping->i_mmap_lock);
763 * try_to_unmap - try to remove all page table mappings to a page
764 * @page: the page to get unmapped
766 * Tries to remove all the page table entries which are mapping this
767 * page, used in the pageout path. Caller must hold the page lock
768 * and its rmap lock. Return values are:
770 * SWAP_SUCCESS - we succeeded in removing all mappings
771 * SWAP_AGAIN - we missed a trylock, try again later
772 * SWAP_FAIL - the page is unswappable
774 int try_to_unmap(struct page *page)
778 BUG_ON(PageReserved(page));
779 BUG_ON(!PageLocked(page));
780 BUG_ON(!page->mapcount);
783 ret = try_to_unmap_anon(page);
785 ret = try_to_unmap_file(page);
787 if (!page->mapcount) {
788 if (page_test_and_clear_dirty(page))
789 set_page_dirty(page);
791 clear_page_anon(page);
792 dec_page_state(nr_mapped);