2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_sem (while writing or truncating, not reading or faulting)
26 * When a page fault occurs in writing from user to file, down_read
27 * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
28 * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
29 * taken together; in truncation, i_sem is taken outermost.
32 * page->flags PG_locked (lock_page)
33 * mapping->i_mmap_lock
36 * zone->lru_lock (in mark_page_accessed)
37 * swap_list_lock (in swap_free etc's swap_info_get)
38 * mmlist_lock (in mmput, drain_mmlist and others)
39 * swap_device_lock (in swap_duplicate, swap_info_get)
40 * mapping->private_lock (in __set_page_dirty_buffers)
41 * inode_lock (in set_page_dirty's __mark_inode_dirty)
42 * sb_lock (within inode_lock in fs/fs-writeback.c)
43 * mapping->tree_lock (widely used, in set_page_dirty,
44 * in arch-dependent flush_dcache_mmap_lock,
45 * within inode_lock in __sync_single_inode)
49 #include <linux/pagemap.h>
50 #include <linux/swap.h>
51 #include <linux/swapops.h>
52 #include <linux/slab.h>
53 #include <linux/init.h>
54 #include <linux/rmap.h>
55 #include <linux/rcupdate.h>
56 #include <linux/vs_memory.h>
58 #include <asm/tlbflush.h>
60 //#define RMAP_DEBUG /* can be enabled only for debugging */
62 kmem_cache_t *anon_vma_cachep;
64 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
67 struct anon_vma *anon_vma = find_vma->anon_vma;
68 struct vm_area_struct *vma;
69 unsigned int mapcount = 0;
72 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
74 BUG_ON(mapcount > 100000);
82 /* This must be called under the mmap_sem. */
83 int anon_vma_prepare(struct vm_area_struct *vma)
85 struct anon_vma *anon_vma = vma->anon_vma;
88 if (unlikely(!anon_vma)) {
89 struct mm_struct *mm = vma->vm_mm;
90 struct anon_vma *allocated, *locked;
92 anon_vma = find_mergeable_anon_vma(vma);
96 spin_lock(&locked->lock);
98 anon_vma = anon_vma_alloc();
99 if (unlikely(!anon_vma))
101 allocated = anon_vma;
105 /* page_table_lock to protect against threads */
106 spin_lock(&mm->page_table_lock);
107 if (likely(!vma->anon_vma)) {
108 vma->anon_vma = anon_vma;
109 list_add(&vma->anon_vma_node, &anon_vma->head);
112 spin_unlock(&mm->page_table_lock);
115 spin_unlock(&locked->lock);
116 if (unlikely(allocated))
117 anon_vma_free(allocated);
122 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
124 BUG_ON(vma->anon_vma != next->anon_vma);
125 list_del(&next->anon_vma_node);
128 void __anon_vma_link(struct vm_area_struct *vma)
130 struct anon_vma *anon_vma = vma->anon_vma;
133 list_add(&vma->anon_vma_node, &anon_vma->head);
134 validate_anon_vma(vma);
138 void anon_vma_link(struct vm_area_struct *vma)
140 struct anon_vma *anon_vma = vma->anon_vma;
143 spin_lock(&anon_vma->lock);
144 list_add(&vma->anon_vma_node, &anon_vma->head);
145 validate_anon_vma(vma);
146 spin_unlock(&anon_vma->lock);
150 void anon_vma_unlink(struct vm_area_struct *vma)
152 struct anon_vma *anon_vma = vma->anon_vma;
158 spin_lock(&anon_vma->lock);
159 validate_anon_vma(vma);
160 list_del(&vma->anon_vma_node);
162 /* We must garbage collect the anon_vma if it's empty */
163 empty = list_empty(&anon_vma->head);
164 spin_unlock(&anon_vma->lock);
167 anon_vma_free(anon_vma);
170 static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
172 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
173 SLAB_CTOR_CONSTRUCTOR) {
174 struct anon_vma *anon_vma = data;
176 spin_lock_init(&anon_vma->lock);
177 INIT_LIST_HEAD(&anon_vma->head);
181 void __init anon_vma_init(void)
183 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
184 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
188 * Getting a lock on a stable anon_vma from a page off the LRU is
189 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
191 static struct anon_vma *page_lock_anon_vma(struct page *page)
193 struct anon_vma *anon_vma = NULL;
194 unsigned long anon_mapping;
197 anon_mapping = (unsigned long) page->mapping;
198 if (!(anon_mapping & PAGE_MAPPING_ANON))
200 if (!page_mapped(page))
203 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
204 spin_lock(&anon_vma->lock);
211 * At what user virtual address is page expected in vma?
213 static inline unsigned long
214 vma_address(struct page *page, struct vm_area_struct *vma)
216 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
217 unsigned long address;
219 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
220 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
221 /* page should be within any vma from prio_tree_next */
222 BUG_ON(!PageAnon(page));
229 * At what user virtual address is page expected in vma? checking that the
230 * page matches the vma: currently only used by unuse_process, on anon pages.
232 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
234 if (PageAnon(page)) {
235 if ((void *)vma->anon_vma !=
236 (void *)page->mapping - PAGE_MAPPING_ANON)
238 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
239 if (vma->vm_file->f_mapping != page->mapping)
243 return vma_address(page, vma);
247 * Check that @page is mapped at @address into @mm.
249 * On success returns with mapped pte and locked mm->page_table_lock.
251 static pte_t *page_check_address(struct page *page, struct mm_struct *mm,
252 unsigned long address)
260 * We need the page_table_lock to protect us from page faults,
261 * munmap, fork, etc...
263 spin_lock(&mm->page_table_lock);
264 pgd = pgd_offset(mm, address);
265 if (likely(pgd_present(*pgd))) {
266 pud = pud_offset(pgd, address);
267 if (likely(pud_present(*pud))) {
268 pmd = pmd_offset(pud, address);
269 if (likely(pmd_present(*pmd))) {
270 pte = pte_offset_map(pmd, address);
271 if (likely(pte_present(*pte) &&
272 page_to_pfn(page) == pte_pfn(*pte)))
278 spin_unlock(&mm->page_table_lock);
279 return ERR_PTR(-ENOENT);
283 * Subfunctions of page_referenced: page_referenced_one called
284 * repeatedly from either page_referenced_anon or page_referenced_file.
286 static int page_referenced_one(struct page *page,
287 struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
289 struct mm_struct *mm = vma->vm_mm;
290 unsigned long address;
294 if (!get_mm_counter(mm, rss))
296 address = vma_address(page, vma);
297 if (address == -EFAULT)
300 pte = page_check_address(page, mm, address);
302 if (ptep_clear_flush_young(vma, address, pte))
305 if (mm != current->mm && !ignore_token && has_swap_token(mm))
310 spin_unlock(&mm->page_table_lock);
316 static int page_referenced_anon(struct page *page, int ignore_token)
318 unsigned int mapcount;
319 struct anon_vma *anon_vma;
320 struct vm_area_struct *vma;
323 anon_vma = page_lock_anon_vma(page);
327 mapcount = page_mapcount(page);
328 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
329 referenced += page_referenced_one(page, vma, &mapcount,
334 spin_unlock(&anon_vma->lock);
339 * page_referenced_file - referenced check for object-based rmap
340 * @page: the page we're checking references on.
342 * For an object-based mapped page, find all the places it is mapped and
343 * check/clear the referenced flag. This is done by following the page->mapping
344 * pointer, then walking the chain of vmas it holds. It returns the number
345 * of references it found.
347 * This function is only called from page_referenced for object-based pages.
349 static int page_referenced_file(struct page *page, int ignore_token)
351 unsigned int mapcount;
352 struct address_space *mapping = page->mapping;
353 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
354 struct vm_area_struct *vma;
355 struct prio_tree_iter iter;
359 * The caller's checks on page->mapping and !PageAnon have made
360 * sure that this is a file page: the check for page->mapping
361 * excludes the case just before it gets set on an anon page.
363 BUG_ON(PageAnon(page));
366 * The page lock not only makes sure that page->mapping cannot
367 * suddenly be NULLified by truncation, it makes sure that the
368 * structure at mapping cannot be freed and reused yet,
369 * so we can safely take mapping->i_mmap_lock.
371 BUG_ON(!PageLocked(page));
373 spin_lock(&mapping->i_mmap_lock);
376 * i_mmap_lock does not stabilize mapcount at all, but mapcount
377 * is more likely to be accurate if we note it after spinning.
379 mapcount = page_mapcount(page);
381 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
382 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
383 == (VM_LOCKED|VM_MAYSHARE)) {
387 referenced += page_referenced_one(page, vma, &mapcount,
393 spin_unlock(&mapping->i_mmap_lock);
398 * page_referenced - test if the page was referenced
399 * @page: the page to test
400 * @is_locked: caller holds lock on the page
402 * Quick test_and_clear_referenced for all mappings to a page,
403 * returns the number of ptes which referenced the page.
405 int page_referenced(struct page *page, int is_locked, int ignore_token)
409 if (!swap_token_default_timeout)
412 if (page_test_and_clear_young(page))
415 if (TestClearPageReferenced(page))
418 if (page_mapped(page) && page->mapping) {
420 referenced += page_referenced_anon(page, ignore_token);
422 referenced += page_referenced_file(page, ignore_token);
423 else if (TestSetPageLocked(page))
427 referenced += page_referenced_file(page,
436 * page_add_anon_rmap - add pte mapping to an anonymous page
437 * @page: the page to add the mapping to
438 * @vma: the vm area in which the mapping is added
439 * @address: the user virtual address mapped
441 * The caller needs to hold the mm->page_table_lock.
443 void page_add_anon_rmap(struct page *page,
444 struct vm_area_struct *vma, unsigned long address)
446 struct anon_vma *anon_vma = vma->anon_vma;
449 BUG_ON(PageReserved(page));
452 inc_mm_counter(vma->vm_mm, anon_rss);
454 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
455 index = (address - vma->vm_start) >> PAGE_SHIFT;
456 index += vma->vm_pgoff;
457 index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
459 if (atomic_inc_and_test(&page->_mapcount)) {
461 page->mapping = (struct address_space *) anon_vma;
462 inc_page_state(nr_mapped);
464 /* else checking page index and mapping is racy */
468 * page_add_file_rmap - add pte mapping to a file page
469 * @page: the page to add the mapping to
471 * The caller needs to hold the mm->page_table_lock.
473 void page_add_file_rmap(struct page *page)
475 BUG_ON(PageAnon(page));
476 if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
479 if (atomic_inc_and_test(&page->_mapcount))
480 inc_page_state(nr_mapped);
484 * page_remove_rmap - take down pte mapping from a page
485 * @page: page to remove mapping from
487 * Caller needs to hold the mm->page_table_lock.
489 void page_remove_rmap(struct page *page)
491 BUG_ON(PageReserved(page));
493 if (atomic_add_negative(-1, &page->_mapcount)) {
494 BUG_ON(page_mapcount(page) < 0);
496 * It would be tidy to reset the PageAnon mapping here,
497 * but that might overwrite a racing page_add_anon_rmap
498 * which increments mapcount after us but sets mapping
499 * before us: so leave the reset to free_hot_cold_page,
500 * and remember that it's only reliable while mapped.
501 * Leaving it set also helps swapoff to reinstate ptes
502 * faster for those pages still in swapcache.
504 if (page_test_and_clear_dirty(page))
505 set_page_dirty(page);
506 dec_page_state(nr_mapped);
511 * Subfunctions of try_to_unmap: try_to_unmap_one called
512 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
514 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
516 struct mm_struct *mm = vma->vm_mm;
517 unsigned long address;
520 int ret = SWAP_AGAIN;
522 if (!get_mm_counter(mm, rss))
524 address = vma_address(page, vma);
525 if (address == -EFAULT)
528 pte = page_check_address(page, mm, address);
533 * If the page is mlock()d, we cannot swap it out.
534 * If it's recently referenced (perhaps page_referenced
535 * skipped over this mm) then we should reactivate it.
537 if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
538 ptep_clear_flush_young(vma, address, pte)) {
544 * Don't pull an anonymous page out from under get_user_pages.
545 * GUP carefully breaks COW and raises page count (while holding
546 * page_table_lock, as we have here) to make sure that the page
547 * cannot be freed. If we unmap that page here, a user write
548 * access to the virtual address will bring back the page, but
549 * its raised count will (ironically) be taken to mean it's not
550 * an exclusive swap page, do_wp_page will replace it by a copy
551 * page, and the user never get to see the data GUP was holding
552 * the original page for.
554 * This test is also useful for when swapoff (unuse_process) has
555 * to drop page lock: its reference to the page stops existing
556 * ptes from being unmapped, so swapoff can make progress.
558 if (PageSwapCache(page) &&
559 page_count(page) != page_mapcount(page) + 2) {
564 /* Nuke the page table entry. */
565 flush_cache_page(vma, address, page_to_pfn(page));
566 pteval = ptep_clear_flush(vma, address, pte);
568 /* Move the dirty bit to the physical page now the pte is gone. */
569 if (pte_dirty(pteval))
570 set_page_dirty(page);
572 if (PageAnon(page)) {
573 swp_entry_t entry = { .val = page->private };
575 * Store the swap location in the pte.
576 * See handle_pte_fault() ...
578 BUG_ON(!PageSwapCache(page));
579 swap_duplicate(entry);
580 if (list_empty(&mm->mmlist)) {
581 spin_lock(&mmlist_lock);
582 list_add(&mm->mmlist, &init_mm.mmlist);
583 spin_unlock(&mmlist_lock);
585 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
586 BUG_ON(pte_file(*pte));
587 dec_mm_counter(mm, anon_rss);
590 dec_mm_counter(mm, rss);
591 page_remove_rmap(page);
592 page_cache_release(page);
596 spin_unlock(&mm->page_table_lock);
602 * objrmap doesn't work for nonlinear VMAs because the assumption that
603 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
604 * Consequently, given a particular page and its ->index, we cannot locate the
605 * ptes which are mapping that page without an exhaustive linear search.
607 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
608 * maps the file to which the target page belongs. The ->vm_private_data field
609 * holds the current cursor into that scan. Successive searches will circulate
610 * around the vma's virtual address space.
612 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
613 * more scanning pressure is placed against them as well. Eventually pages
614 * will become fully unmapped and are eligible for eviction.
616 * For very sparsely populated VMAs this is a little inefficient - chances are
617 * there there won't be many ptes located within the scan cluster. In this case
618 * maybe we could scan further - to the end of the pte page, perhaps.
620 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
621 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
623 static void try_to_unmap_cluster(unsigned long cursor,
624 unsigned int *mapcount, struct vm_area_struct *vma)
626 struct mm_struct *mm = vma->vm_mm;
630 pte_t *pte, *original_pte;
633 unsigned long address;
638 * We need the page_table_lock to protect us from page faults,
639 * munmap, fork, etc...
641 spin_lock(&mm->page_table_lock);
643 address = (vma->vm_start + cursor) & CLUSTER_MASK;
644 end = address + CLUSTER_SIZE;
645 if (address < vma->vm_start)
646 address = vma->vm_start;
647 if (end > vma->vm_end)
650 pgd = pgd_offset(mm, address);
651 if (!pgd_present(*pgd))
654 pud = pud_offset(pgd, address);
655 if (!pud_present(*pud))
658 pmd = pmd_offset(pud, address);
659 if (!pmd_present(*pmd))
662 for (original_pte = pte = pte_offset_map(pmd, address);
663 address < end; pte++, address += PAGE_SIZE) {
665 if (!pte_present(*pte))
672 page = pfn_to_page(pfn);
673 BUG_ON(PageAnon(page));
674 if (PageReserved(page))
677 if (ptep_clear_flush_young(vma, address, pte))
680 /* Nuke the page table entry. */
681 flush_cache_page(vma, address, pfn);
682 pteval = ptep_clear_flush(vma, address, pte);
684 /* If nonlinear, store the file page offset in the pte. */
685 if (page->index != linear_page_index(vma, address))
686 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
688 /* Move the dirty bit to the physical page now the pte is gone. */
689 if (pte_dirty(pteval))
690 set_page_dirty(page);
692 page_remove_rmap(page);
693 page_cache_release(page);
694 dec_mm_counter(mm, rss);
698 pte_unmap(original_pte);
700 spin_unlock(&mm->page_table_lock);
703 static int try_to_unmap_anon(struct page *page)
705 struct anon_vma *anon_vma;
706 struct vm_area_struct *vma;
707 int ret = SWAP_AGAIN;
709 anon_vma = page_lock_anon_vma(page);
713 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
714 ret = try_to_unmap_one(page, vma);
715 if (ret == SWAP_FAIL || !page_mapped(page))
718 spin_unlock(&anon_vma->lock);
723 * try_to_unmap_file - unmap file page using the object-based rmap method
724 * @page: the page to unmap
726 * Find all the mappings of a page using the mapping pointer and the vma chains
727 * contained in the address_space struct it points to.
729 * This function is only called from try_to_unmap for object-based pages.
731 static int try_to_unmap_file(struct page *page)
733 struct address_space *mapping = page->mapping;
734 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
735 struct vm_area_struct *vma;
736 struct prio_tree_iter iter;
737 int ret = SWAP_AGAIN;
738 unsigned long cursor;
739 unsigned long max_nl_cursor = 0;
740 unsigned long max_nl_size = 0;
741 unsigned int mapcount;
743 spin_lock(&mapping->i_mmap_lock);
744 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
745 ret = try_to_unmap_one(page, vma);
746 if (ret == SWAP_FAIL || !page_mapped(page))
750 if (list_empty(&mapping->i_mmap_nonlinear))
753 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
754 shared.vm_set.list) {
755 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
757 cursor = (unsigned long) vma->vm_private_data;
758 if (cursor > max_nl_cursor)
759 max_nl_cursor = cursor;
760 cursor = vma->vm_end - vma->vm_start;
761 if (cursor > max_nl_size)
762 max_nl_size = cursor;
765 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
771 * We don't try to search for this page in the nonlinear vmas,
772 * and page_referenced wouldn't have found it anyway. Instead
773 * just walk the nonlinear vmas trying to age and unmap some.
774 * The mapcount of the page we came in with is irrelevant,
775 * but even so use it as a guide to how hard we should try?
777 mapcount = page_mapcount(page);
780 cond_resched_lock(&mapping->i_mmap_lock);
782 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
783 if (max_nl_cursor == 0)
784 max_nl_cursor = CLUSTER_SIZE;
787 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
788 shared.vm_set.list) {
789 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
791 cursor = (unsigned long) vma->vm_private_data;
792 while (get_mm_counter(vma->vm_mm, rss) &&
793 cursor < max_nl_cursor &&
794 cursor < vma->vm_end - vma->vm_start) {
795 try_to_unmap_cluster(cursor, &mapcount, vma);
796 cursor += CLUSTER_SIZE;
797 vma->vm_private_data = (void *) cursor;
798 if ((int)mapcount <= 0)
801 vma->vm_private_data = (void *) max_nl_cursor;
803 cond_resched_lock(&mapping->i_mmap_lock);
804 max_nl_cursor += CLUSTER_SIZE;
805 } while (max_nl_cursor <= max_nl_size);
808 * Don't loop forever (perhaps all the remaining pages are
809 * in locked vmas). Reset cursor on all unreserved nonlinear
810 * vmas, now forgetting on which ones it had fallen behind.
812 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
813 shared.vm_set.list) {
814 if (!(vma->vm_flags & VM_RESERVED))
815 vma->vm_private_data = NULL;
818 spin_unlock(&mapping->i_mmap_lock);
823 * try_to_unmap - try to remove all page table mappings to a page
824 * @page: the page to get unmapped
826 * Tries to remove all the page table entries which are mapping this
827 * page, used in the pageout path. Caller must hold the page lock.
830 * SWAP_SUCCESS - we succeeded in removing all mappings
831 * SWAP_AGAIN - we missed a mapping, try again later
832 * SWAP_FAIL - the page is unswappable
834 int try_to_unmap(struct page *page)
838 BUG_ON(PageReserved(page));
839 BUG_ON(!PageLocked(page));
842 ret = try_to_unmap_anon(page);
844 ret = try_to_unmap_file(page);
846 if (!page_mapped(page))