2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
22 * - the page->mapcount field is protected by the PG_maplock bit,
23 * which nests within the mm->page_table_lock,
24 * which nests within the page lock.
25 * - because swapout locking is opposite to the locking order
26 * in the page fault path, the swapout path uses trylocks
27 * on the mm->page_table_lock
30 #include <linux/pagemap.h>
31 #include <linux/swap.h>
32 #include <linux/swapops.h>
33 #include <linux/slab.h>
34 #include <linux/init.h>
35 #include <linux/rmap.h>
37 #include <asm/tlbflush.h>
39 //#define RMAP_DEBUG /* can be enabled only for debugging */
41 kmem_cache_t *anon_vma_cachep;
43 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
46 struct anon_vma *anon_vma = find_vma->anon_vma;
47 struct vm_area_struct *vma;
48 unsigned int mapcount = 0;
51 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
53 BUG_ON(mapcount > 100000);
61 /* This must be called under the mmap_sem. */
62 int anon_vma_prepare(struct vm_area_struct *vma)
64 struct anon_vma *anon_vma = vma->anon_vma;
67 if (unlikely(!anon_vma)) {
68 struct mm_struct *mm = vma->vm_mm;
69 struct anon_vma *allocated = NULL;
71 anon_vma = find_mergeable_anon_vma(vma);
73 anon_vma = anon_vma_alloc();
74 if (unlikely(!anon_vma))
79 /* page_table_lock to protect against threads */
80 spin_lock(&mm->page_table_lock);
81 if (likely(!vma->anon_vma)) {
82 vma->anon_vma = anon_vma;
83 list_add(&vma->anon_vma_node, &anon_vma->head);
86 spin_unlock(&mm->page_table_lock);
87 if (unlikely(allocated))
88 anon_vma_free(allocated);
93 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
96 BUG_ON(!next->anon_vma);
97 vma->anon_vma = next->anon_vma;
98 list_add(&vma->anon_vma_node, &next->anon_vma_node);
100 /* if they're both non-null they must be the same */
101 BUG_ON(vma->anon_vma != next->anon_vma);
103 list_del(&next->anon_vma_node);
106 void __anon_vma_link(struct vm_area_struct *vma)
108 struct anon_vma *anon_vma = vma->anon_vma;
111 list_add(&vma->anon_vma_node, &anon_vma->head);
112 validate_anon_vma(vma);
116 void anon_vma_link(struct vm_area_struct *vma)
118 struct anon_vma *anon_vma = vma->anon_vma;
121 spin_lock(&anon_vma->lock);
122 list_add(&vma->anon_vma_node, &anon_vma->head);
123 validate_anon_vma(vma);
124 spin_unlock(&anon_vma->lock);
128 void anon_vma_unlink(struct vm_area_struct *vma)
130 struct anon_vma *anon_vma = vma->anon_vma;
136 spin_lock(&anon_vma->lock);
137 validate_anon_vma(vma);
138 list_del(&vma->anon_vma_node);
140 /* We must garbage collect the anon_vma if it's empty */
141 empty = list_empty(&anon_vma->head);
142 spin_unlock(&anon_vma->lock);
145 anon_vma_free(anon_vma);
148 static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
150 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
151 SLAB_CTOR_CONSTRUCTOR) {
152 struct anon_vma *anon_vma = data;
154 spin_lock_init(&anon_vma->lock);
155 INIT_LIST_HEAD(&anon_vma->head);
159 void __init anon_vma_init(void)
161 anon_vma_cachep = kmem_cache_create("anon_vma",
162 sizeof(struct anon_vma), 0, SLAB_PANIC, anon_vma_ctor, NULL);
165 /* this needs the page->flags PG_maplock held */
166 static inline void clear_page_anon(struct page *page)
168 BUG_ON(!page->mapping);
169 page->mapping = NULL;
174 * At what user virtual address is page expected in vma?
176 static inline unsigned long
177 vma_address(struct page *page, struct vm_area_struct *vma)
179 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
180 unsigned long address;
182 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
183 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
184 /* page should be within any vma from prio_tree_next */
185 BUG_ON(!PageAnon(page));
192 * Subfunctions of page_referenced: page_referenced_one called
193 * repeatedly from either page_referenced_anon or page_referenced_file.
195 static int page_referenced_one(struct page *page,
196 struct vm_area_struct *vma, unsigned int *mapcount, int *failed)
198 struct mm_struct *mm = vma->vm_mm;
199 unsigned long address;
207 address = vma_address(page, vma);
208 if (address == -EFAULT)
211 if (!spin_trylock(&mm->page_table_lock)) {
213 * For debug we're currently warning if not all found,
214 * but in this case that's expected: suppress warning.
220 pgd = pgd_offset(mm, address);
221 if (!pgd_present(*pgd))
224 pmd = pmd_offset(pgd, address);
225 if (!pmd_present(*pmd))
228 pte = pte_offset_map(pmd, address);
229 if (!pte_present(*pte))
232 if (page_to_pfn(page) != pte_pfn(*pte))
235 if (ptep_test_and_clear_young(pte))
243 spin_unlock(&mm->page_table_lock);
248 static inline int page_referenced_anon(struct page *page)
250 unsigned int mapcount = page->mapcount;
251 struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
252 struct vm_area_struct *vma;
256 spin_lock(&anon_vma->lock);
257 BUG_ON(list_empty(&anon_vma->head));
258 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
259 referenced += page_referenced_one(page, vma,
266 spin_unlock(&anon_vma->lock);
271 * page_referenced_file - referenced check for object-based rmap
272 * @page: the page we're checking references on.
274 * For an object-based mapped page, find all the places it is mapped and
275 * check/clear the referenced flag. This is done by following the page->mapping
276 * pointer, then walking the chain of vmas it holds. It returns the number
277 * of references it found.
279 * This function is only called from page_referenced for object-based pages.
281 * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
282 * assume a reference count of 0, so try_to_unmap will then have a go.
284 static inline int page_referenced_file(struct page *page)
286 unsigned int mapcount = page->mapcount;
287 struct address_space *mapping = page->mapping;
288 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
289 struct vm_area_struct *vma = NULL;
290 struct prio_tree_iter iter;
294 if (!spin_trylock(&mapping->i_mmap_lock))
297 while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
298 &iter, pgoff, pgoff)) != NULL) {
299 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
300 == (VM_LOCKED|VM_MAYSHARE)) {
304 referenced += page_referenced_one(page, vma,
310 if (list_empty(&mapping->i_mmap_nonlinear))
313 spin_unlock(&mapping->i_mmap_lock);
318 * page_referenced - test if the page was referenced
319 * @page: the page to test
321 * Quick test_and_clear_referenced for all mappings to a page,
322 * returns the number of ptes which referenced the page.
323 * Caller needs to hold the rmap lock.
325 int page_referenced(struct page *page)
329 if (page_test_and_clear_young(page))
332 if (TestClearPageReferenced(page))
335 if (page->mapcount && page->mapping) {
337 referenced += page_referenced_anon(page);
339 referenced += page_referenced_file(page);
345 * page_add_anon_rmap - add pte mapping to an anonymous page
346 * @page: the page to add the mapping to
347 * @vma: the vm area in which the mapping is added
348 * @address: the user virtual address mapped
350 * The caller needs to hold the mm->page_table_lock.
352 void page_add_anon_rmap(struct page *page,
353 struct vm_area_struct *vma, unsigned long address)
355 struct anon_vma *anon_vma = vma->anon_vma;
358 BUG_ON(PageReserved(page));
361 index = (address - vma->vm_start) >> PAGE_SHIFT;
362 index += vma->vm_pgoff;
363 index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
366 * Setting and clearing PG_anon must always happen inside
367 * page_map_lock to avoid races between mapping and
368 * unmapping on different processes of the same
369 * shared cow swapcache page. And while we take the
370 * page_map_lock PG_anon cannot change from under us.
371 * Actually PG_anon cannot change under fork either
372 * since fork holds a reference on the page so it cannot
373 * be unmapped under fork and in turn copy_page_range is
374 * allowed to read PG_anon outside the page_map_lock.
377 if (!page->mapcount) {
378 BUG_ON(PageAnon(page));
379 BUG_ON(page->mapping);
382 page->mapping = (struct address_space *) anon_vma;
383 inc_page_state(nr_mapped);
385 BUG_ON(!PageAnon(page));
386 BUG_ON(page->index != index);
387 BUG_ON(page->mapping != (struct address_space *) anon_vma);
390 page_map_unlock(page);
394 * page_add_file_rmap - add pte mapping to a file page
395 * @page: the page to add the mapping to
397 * The caller needs to hold the mm->page_table_lock.
399 void page_add_file_rmap(struct page *page)
401 BUG_ON(PageAnon(page));
402 if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
407 inc_page_state(nr_mapped);
409 page_map_unlock(page);
413 * page_remove_rmap - take down pte mapping from a page
414 * @page: page to remove mapping from
416 * Caller needs to hold the mm->page_table_lock.
418 void page_remove_rmap(struct page *page)
420 BUG_ON(PageReserved(page));
421 BUG_ON(!page->mapcount);
425 if (!page->mapcount) {
426 if (page_test_and_clear_dirty(page))
427 set_page_dirty(page);
429 clear_page_anon(page);
430 dec_page_state(nr_mapped);
432 page_map_unlock(page);
436 * Subfunctions of try_to_unmap: try_to_unmap_one called
437 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
439 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
441 struct mm_struct *mm = vma->vm_mm;
442 unsigned long address;
447 int ret = SWAP_AGAIN;
451 address = vma_address(page, vma);
452 if (address == -EFAULT)
456 * We need the page_table_lock to protect us from page faults,
457 * munmap, fork, etc...
459 if (!spin_trylock(&mm->page_table_lock))
462 pgd = pgd_offset(mm, address);
463 if (!pgd_present(*pgd))
466 pmd = pmd_offset(pgd, address);
467 if (!pmd_present(*pmd))
470 pte = pte_offset_map(pmd, address);
471 if (!pte_present(*pte))
474 if (page_to_pfn(page) != pte_pfn(*pte))
478 * If the page is mlock()d, we cannot swap it out.
479 * If it's recently referenced (perhaps page_referenced
480 * skipped over this mm) then we should reactivate it.
482 if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
483 ptep_test_and_clear_young(pte)) {
488 /* Nuke the page table entry. */
489 flush_cache_page(vma, address);
490 pteval = ptep_clear_flush(vma, address, pte);
492 /* Move the dirty bit to the physical page now the pte is gone. */
493 if (pte_dirty(pteval))
494 set_page_dirty(page);
496 if (PageAnon(page)) {
497 swp_entry_t entry = { .val = page->private };
499 * Store the swap location in the pte.
500 * See handle_pte_fault() ...
502 BUG_ON(!PageSwapCache(page));
503 swap_duplicate(entry);
504 set_pte(pte, swp_entry_to_pte(entry));
505 BUG_ON(pte_file(*pte));
509 BUG_ON(!page->mapcount);
511 page_cache_release(page);
516 spin_unlock(&mm->page_table_lock);
522 * objrmap doesn't work for nonlinear VMAs because the assumption that
523 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
524 * Consequently, given a particular page and its ->index, we cannot locate the
525 * ptes which are mapping that page without an exhaustive linear search.
527 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
528 * maps the file to which the target page belongs. The ->vm_private_data field
529 * holds the current cursor into that scan. Successive searches will circulate
530 * around the vma's virtual address space.
532 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
533 * more scanning pressure is placed against them as well. Eventually pages
534 * will become fully unmapped and are eligible for eviction.
536 * For very sparsely populated VMAs this is a little inefficient - chances are
537 * there there won't be many ptes located within the scan cluster. In this case
538 * maybe we could scan further - to the end of the pte page, perhaps.
540 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
541 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
543 static int try_to_unmap_cluster(unsigned long cursor,
544 unsigned int *mapcount, struct vm_area_struct *vma)
546 struct mm_struct *mm = vma->vm_mm;
552 unsigned long address;
557 * We need the page_table_lock to protect us from page faults,
558 * munmap, fork, etc...
560 if (!spin_trylock(&mm->page_table_lock))
563 address = (vma->vm_start + cursor) & CLUSTER_MASK;
564 end = address + CLUSTER_SIZE;
565 if (address < vma->vm_start)
566 address = vma->vm_start;
567 if (end > vma->vm_end)
570 pgd = pgd_offset(mm, address);
571 if (!pgd_present(*pgd))
574 pmd = pmd_offset(pgd, address);
575 if (!pmd_present(*pmd))
578 for (pte = pte_offset_map(pmd, address);
579 address < end; pte++, address += PAGE_SIZE) {
581 if (!pte_present(*pte))
588 page = pfn_to_page(pfn);
589 BUG_ON(PageAnon(page));
590 if (PageReserved(page))
593 if (ptep_test_and_clear_young(pte))
596 /* Nuke the page table entry. */
597 flush_cache_page(vma, address);
598 pteval = ptep_clear_flush(vma, address, pte);
600 /* If nonlinear, store the file page offset in the pte. */
601 if (page->index != linear_page_index(vma, address))
602 set_pte(pte, pgoff_to_pte(page->index));
604 /* Move the dirty bit to the physical page now the pte is gone. */
605 if (pte_dirty(pteval))
606 set_page_dirty(page);
608 page_remove_rmap(page);
609 page_cache_release(page);
617 spin_unlock(&mm->page_table_lock);
621 static inline int try_to_unmap_anon(struct page *page)
623 struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
624 struct vm_area_struct *vma;
625 int ret = SWAP_AGAIN;
627 spin_lock(&anon_vma->lock);
628 BUG_ON(list_empty(&anon_vma->head));
629 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
630 ret = try_to_unmap_one(page, vma);
631 if (ret == SWAP_FAIL || !page->mapcount)
634 spin_unlock(&anon_vma->lock);
639 * try_to_unmap_file - unmap file page using the object-based rmap method
640 * @page: the page to unmap
642 * Find all the mappings of a page using the mapping pointer and the vma chains
643 * contained in the address_space struct it points to.
645 * This function is only called from try_to_unmap for object-based pages.
647 * The spinlock address_space->i_mmap_lock is tried. If it can't be gotten,
648 * return a temporary error.
650 static inline int try_to_unmap_file(struct page *page)
652 struct address_space *mapping = page->mapping;
653 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
654 struct vm_area_struct *vma = NULL;
655 struct prio_tree_iter iter;
656 int ret = SWAP_AGAIN;
657 unsigned long cursor;
658 unsigned long max_nl_cursor = 0;
659 unsigned long max_nl_size = 0;
660 unsigned int mapcount;
662 if (!spin_trylock(&mapping->i_mmap_lock))
665 while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
666 &iter, pgoff, pgoff)) != NULL) {
667 ret = try_to_unmap_one(page, vma);
668 if (ret == SWAP_FAIL || !page->mapcount)
672 if (list_empty(&mapping->i_mmap_nonlinear))
675 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
676 shared.vm_set.list) {
677 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
679 cursor = (unsigned long) vma->vm_private_data;
680 if (cursor > max_nl_cursor)
681 max_nl_cursor = cursor;
682 cursor = vma->vm_end - vma->vm_start;
683 if (cursor > max_nl_size)
684 max_nl_size = cursor;
687 if (max_nl_size == 0) /* any nonlinears locked or reserved */
691 * We don't try to search for this page in the nonlinear vmas,
692 * and page_referenced wouldn't have found it anyway. Instead
693 * just walk the nonlinear vmas trying to age and unmap some.
694 * The mapcount of the page we came in with is irrelevant,
695 * but even so use it as a guide to how hard we should try?
697 mapcount = page->mapcount;
698 page_map_unlock(page);
699 cond_resched_lock(&mapping->i_mmap_lock);
701 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
702 if (max_nl_cursor == 0)
703 max_nl_cursor = CLUSTER_SIZE;
706 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
707 shared.vm_set.list) {
708 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
710 cursor = (unsigned long) vma->vm_private_data;
711 while (vma->vm_mm->rss &&
712 cursor < max_nl_cursor &&
713 cursor < vma->vm_end - vma->vm_start) {
714 ret = try_to_unmap_cluster(
715 cursor, &mapcount, vma);
716 if (ret == SWAP_FAIL)
718 cursor += CLUSTER_SIZE;
719 vma->vm_private_data = (void *) cursor;
720 if ((int)mapcount <= 0)
723 if (ret != SWAP_FAIL)
724 vma->vm_private_data =
725 (void *) max_nl_cursor;
728 cond_resched_lock(&mapping->i_mmap_lock);
729 max_nl_cursor += CLUSTER_SIZE;
730 } while (max_nl_cursor <= max_nl_size);
733 * Don't loop forever (perhaps all the remaining pages are
734 * in locked vmas). Reset cursor on all unreserved nonlinear
735 * vmas, now forgetting on which ones it had fallen behind.
737 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
738 shared.vm_set.list) {
739 if (!(vma->vm_flags & VM_RESERVED))
740 vma->vm_private_data = 0;
745 spin_unlock(&mapping->i_mmap_lock);
750 * try_to_unmap - try to remove all page table mappings to a page
751 * @page: the page to get unmapped
753 * Tries to remove all the page table entries which are mapping this
754 * page, used in the pageout path. Caller must hold the page lock
755 * and its rmap lock. Return values are:
757 * SWAP_SUCCESS - we succeeded in removing all mappings
758 * SWAP_AGAIN - we missed a trylock, try again later
759 * SWAP_FAIL - the page is unswappable
761 int try_to_unmap(struct page *page)
765 BUG_ON(PageReserved(page));
766 BUG_ON(!PageLocked(page));
767 BUG_ON(!page->mapcount);
770 ret = try_to_unmap_anon(page);
772 ret = try_to_unmap_file(page);
774 if (!page->mapcount) {
775 if (page_test_and_clear_dirty(page))
776 set_page_dirty(page);
778 clear_page_anon(page);
779 dec_page_state(nr_mapped);