2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * inode->i_alloc_sem (vmtruncate_range)
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44 #include <linux/swapops.h>
45 #include <linux/slab.h>
46 #include <linux/init.h>
47 #include <linux/rmap.h>
48 #include <linux/rcupdate.h>
49 #include <linux/module.h>
50 #include <linux/kallsyms.h>
51 #include <linux/vs_memory.h>
53 #include <asm/tlbflush.h>
55 struct kmem_cache *anon_vma_cachep;
57 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
59 #ifdef CONFIG_DEBUG_VM
60 struct anon_vma *anon_vma = find_vma->anon_vma;
61 struct vm_area_struct *vma;
62 unsigned int mapcount = 0;
65 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
67 BUG_ON(mapcount > 100000);
75 /* This must be called under the mmap_sem. */
76 int anon_vma_prepare(struct vm_area_struct *vma)
78 struct anon_vma *anon_vma = vma->anon_vma;
81 if (unlikely(!anon_vma)) {
82 struct mm_struct *mm = vma->vm_mm;
83 struct anon_vma *allocated, *locked;
85 anon_vma = find_mergeable_anon_vma(vma);
89 spin_lock(&locked->lock);
91 anon_vma = anon_vma_alloc();
92 if (unlikely(!anon_vma))
98 /* page_table_lock to protect against threads */
99 spin_lock(&mm->page_table_lock);
100 if (likely(!vma->anon_vma)) {
101 vma->anon_vma = anon_vma;
102 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
105 spin_unlock(&mm->page_table_lock);
108 spin_unlock(&locked->lock);
109 if (unlikely(allocated))
110 anon_vma_free(allocated);
115 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
117 BUG_ON(vma->anon_vma != next->anon_vma);
118 list_del(&next->anon_vma_node);
121 void __anon_vma_link(struct vm_area_struct *vma)
123 struct anon_vma *anon_vma = vma->anon_vma;
126 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
127 validate_anon_vma(vma);
131 void anon_vma_link(struct vm_area_struct *vma)
133 struct anon_vma *anon_vma = vma->anon_vma;
136 spin_lock(&anon_vma->lock);
137 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
138 validate_anon_vma(vma);
139 spin_unlock(&anon_vma->lock);
143 void anon_vma_unlink(struct vm_area_struct *vma)
145 struct anon_vma *anon_vma = vma->anon_vma;
151 spin_lock(&anon_vma->lock);
152 validate_anon_vma(vma);
153 list_del(&vma->anon_vma_node);
155 /* We must garbage collect the anon_vma if it's empty */
156 empty = list_empty(&anon_vma->head);
157 spin_unlock(&anon_vma->lock);
160 anon_vma_free(anon_vma);
163 static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
166 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
167 SLAB_CTOR_CONSTRUCTOR) {
168 struct anon_vma *anon_vma = data;
170 spin_lock_init(&anon_vma->lock);
171 INIT_LIST_HEAD(&anon_vma->head);
175 void __init anon_vma_init(void)
177 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
178 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
182 * Getting a lock on a stable anon_vma from a page off the LRU is
183 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
185 static struct anon_vma *page_lock_anon_vma(struct page *page)
187 struct anon_vma *anon_vma = NULL;
188 unsigned long anon_mapping;
191 anon_mapping = (unsigned long) page->mapping;
192 if (!(anon_mapping & PAGE_MAPPING_ANON))
194 if (!page_mapped(page))
197 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
198 spin_lock(&anon_vma->lock);
205 * At what user virtual address is page expected in vma?
207 static inline unsigned long
208 vma_address(struct page *page, struct vm_area_struct *vma)
210 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
211 unsigned long address;
213 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
214 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
215 /* page should be within any vma from prio_tree_next */
216 BUG_ON(!PageAnon(page));
223 * At what user virtual address is page expected in vma? checking that the
224 * page matches the vma: currently only used on anon pages, by unuse_vma;
226 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
228 if (PageAnon(page)) {
229 if ((void *)vma->anon_vma !=
230 (void *)page->mapping - PAGE_MAPPING_ANON)
232 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
234 vma->vm_file->f_mapping != page->mapping)
238 return vma_address(page, vma);
242 * Check that @page is mapped at @address into @mm.
244 * On success returns with pte mapped and locked.
246 pte_t *page_check_address(struct page *page, struct mm_struct *mm,
247 unsigned long address, spinlock_t **ptlp)
255 pgd = pgd_offset(mm, address);
256 if (!pgd_present(*pgd))
259 pud = pud_offset(pgd, address);
260 if (!pud_present(*pud))
263 pmd = pmd_offset(pud, address);
264 if (!pmd_present(*pmd))
267 pte = pte_offset_map(pmd, address);
268 /* Make a quick check before getting the lock */
269 if (!pte_present(*pte)) {
274 ptl = pte_lockptr(mm, pmd);
276 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
280 pte_unmap_unlock(pte, ptl);
285 * Subfunctions of page_referenced: page_referenced_one called
286 * repeatedly from either page_referenced_anon or page_referenced_file.
288 static int page_referenced_one(struct page *page,
289 struct vm_area_struct *vma, unsigned int *mapcount)
291 struct mm_struct *mm = vma->vm_mm;
292 unsigned long address;
297 address = vma_address(page, vma);
298 if (address == -EFAULT)
301 pte = page_check_address(page, mm, address, &ptl);
305 if (ptep_clear_flush_young(vma, address, pte))
308 /* Pretend the page is referenced if the task has the
309 swap token and is in the middle of a page fault. */
310 if (mm != current->mm && has_swap_token(mm) &&
311 rwsem_is_locked(&mm->mmap_sem))
315 pte_unmap_unlock(pte, ptl);
320 static int page_referenced_anon(struct page *page)
322 unsigned int mapcount;
323 struct anon_vma *anon_vma;
324 struct vm_area_struct *vma;
327 anon_vma = page_lock_anon_vma(page);
331 mapcount = page_mapcount(page);
332 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
333 referenced += page_referenced_one(page, vma, &mapcount);
337 spin_unlock(&anon_vma->lock);
342 * page_referenced_file - referenced check for object-based rmap
343 * @page: the page we're checking references on.
345 * For an object-based mapped page, find all the places it is mapped and
346 * check/clear the referenced flag. This is done by following the page->mapping
347 * pointer, then walking the chain of vmas it holds. It returns the number
348 * of references it found.
350 * This function is only called from page_referenced for object-based pages.
352 static int page_referenced_file(struct page *page)
354 unsigned int mapcount;
355 struct address_space *mapping = page->mapping;
356 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
357 struct vm_area_struct *vma;
358 struct prio_tree_iter iter;
362 * The caller's checks on page->mapping and !PageAnon have made
363 * sure that this is a file page: the check for page->mapping
364 * excludes the case just before it gets set on an anon page.
366 BUG_ON(PageAnon(page));
369 * The page lock not only makes sure that page->mapping cannot
370 * suddenly be NULLified by truncation, it makes sure that the
371 * structure at mapping cannot be freed and reused yet,
372 * so we can safely take mapping->i_mmap_lock.
374 BUG_ON(!PageLocked(page));
376 spin_lock(&mapping->i_mmap_lock);
379 * i_mmap_lock does not stabilize mapcount at all, but mapcount
380 * is more likely to be accurate if we note it after spinning.
382 mapcount = page_mapcount(page);
384 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
385 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
386 == (VM_LOCKED|VM_MAYSHARE)) {
390 referenced += page_referenced_one(page, vma, &mapcount);
395 spin_unlock(&mapping->i_mmap_lock);
400 * page_referenced - test if the page was referenced
401 * @page: the page to test
402 * @is_locked: caller holds lock on the page
404 * Quick test_and_clear_referenced for all mappings to a page,
405 * returns the number of ptes which referenced the page.
407 int page_referenced(struct page *page, int is_locked)
411 if (page_test_and_clear_young(page))
414 if (TestClearPageReferenced(page))
417 if (page_mapped(page) && page->mapping) {
419 referenced += page_referenced_anon(page);
421 referenced += page_referenced_file(page);
422 else if (TestSetPageLocked(page))
426 referenced += page_referenced_file(page);
433 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
435 struct mm_struct *mm = vma->vm_mm;
436 unsigned long address;
441 address = vma_address(page, vma);
442 if (address == -EFAULT)
445 pte = page_check_address(page, mm, address, &ptl);
449 if (pte_dirty(*pte) || pte_write(*pte)) {
452 flush_cache_page(vma, address, pte_pfn(*pte));
453 entry = ptep_clear_flush(vma, address, pte);
454 entry = pte_wrprotect(entry);
455 entry = pte_mkclean(entry);
456 set_pte_at(mm, address, pte, entry);
457 lazy_mmu_prot_update(entry);
461 pte_unmap_unlock(pte, ptl);
466 static int page_mkclean_file(struct address_space *mapping, struct page *page)
468 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
469 struct vm_area_struct *vma;
470 struct prio_tree_iter iter;
473 BUG_ON(PageAnon(page));
475 spin_lock(&mapping->i_mmap_lock);
476 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
477 if (vma->vm_flags & VM_SHARED)
478 ret += page_mkclean_one(page, vma);
480 spin_unlock(&mapping->i_mmap_lock);
484 int page_mkclean(struct page *page)
488 BUG_ON(!PageLocked(page));
490 if (page_mapped(page)) {
491 struct address_space *mapping = page_mapping(page);
493 ret = page_mkclean_file(mapping, page);
495 if (page_test_and_clear_dirty(page))
502 * page_set_anon_rmap - setup new anonymous rmap
503 * @page: the page to add the mapping to
504 * @vma: the vm area in which the mapping is added
505 * @address: the user virtual address mapped
507 static void __page_set_anon_rmap(struct page *page,
508 struct vm_area_struct *vma, unsigned long address)
510 struct anon_vma *anon_vma = vma->anon_vma;
513 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
514 page->mapping = (struct address_space *) anon_vma;
516 page->index = linear_page_index(vma, address);
519 * nr_mapped state can be updated without turning off
520 * interrupts because it is not modified via interrupt.
522 __inc_zone_page_state(page, NR_ANON_PAGES);
526 * page_add_anon_rmap - add pte mapping to an anonymous page
527 * @page: the page to add the mapping to
528 * @vma: the vm area in which the mapping is added
529 * @address: the user virtual address mapped
531 * The caller needs to hold the pte lock.
533 void page_add_anon_rmap(struct page *page,
534 struct vm_area_struct *vma, unsigned long address)
536 if (atomic_inc_and_test(&page->_mapcount))
537 __page_set_anon_rmap(page, vma, address);
538 /* else checking page index and mapping is racy */
542 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
543 * @page: the page to add the mapping to
544 * @vma: the vm area in which the mapping is added
545 * @address: the user virtual address mapped
547 * Same as page_add_anon_rmap but must only be called on *new* pages.
548 * This means the inc-and-test can be bypassed.
550 void page_add_new_anon_rmap(struct page *page,
551 struct vm_area_struct *vma, unsigned long address)
553 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
554 __page_set_anon_rmap(page, vma, address);
558 * page_add_file_rmap - add pte mapping to a file page
559 * @page: the page to add the mapping to
561 * The caller needs to hold the pte lock.
563 void page_add_file_rmap(struct page *page)
565 if (atomic_inc_and_test(&page->_mapcount))
566 __inc_zone_page_state(page, NR_FILE_MAPPED);
570 * page_remove_rmap - take down pte mapping from a page
571 * @page: page to remove mapping from
573 * The caller needs to hold the pte lock.
575 void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
577 if (atomic_add_negative(-1, &page->_mapcount)) {
578 if (unlikely(page_mapcount(page) < 0)) {
579 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
580 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page));
581 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
582 printk (KERN_EMERG " page->count = %x\n", page_count(page));
583 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
584 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
586 print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
587 if (vma->vm_file && vma->vm_file->f_op)
588 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
593 * It would be tidy to reset the PageAnon mapping here,
594 * but that might overwrite a racing page_add_anon_rmap
595 * which increments mapcount after us but sets mapping
596 * before us: so leave the reset to free_hot_cold_page,
597 * and remember that it's only reliable while mapped.
598 * Leaving it set also helps swapoff to reinstate ptes
599 * faster for those pages still in swapcache.
601 if (page_test_and_clear_dirty(page))
602 set_page_dirty(page);
603 __dec_zone_page_state(page,
604 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
609 * Subfunctions of try_to_unmap: try_to_unmap_one called
610 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
612 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
615 struct mm_struct *mm = vma->vm_mm;
616 unsigned long address;
620 int ret = SWAP_AGAIN;
622 address = vma_address(page, vma);
623 if (address == -EFAULT)
626 pte = page_check_address(page, mm, address, &ptl);
631 * If the page is mlock()d, we cannot swap it out.
632 * If it's recently referenced (perhaps page_referenced
633 * skipped over this mm) then we should reactivate it.
635 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
636 (ptep_clear_flush_young(vma, address, pte)))) {
641 /* Nuke the page table entry. */
642 flush_cache_page(vma, address, page_to_pfn(page));
643 pteval = ptep_clear_flush(vma, address, pte);
645 /* Move the dirty bit to the physical page now the pte is gone. */
646 if (pte_dirty(pteval))
647 set_page_dirty(page);
649 /* Update high watermark before we lower rss */
650 update_hiwater_rss(mm);
652 if (PageAnon(page)) {
653 swp_entry_t entry = { .val = page_private(page) };
655 if (PageSwapCache(page)) {
657 * Store the swap location in the pte.
658 * See handle_pte_fault() ...
660 swap_duplicate(entry);
661 if (list_empty(&mm->mmlist)) {
662 spin_lock(&mmlist_lock);
663 if (list_empty(&mm->mmlist))
664 list_add(&mm->mmlist, &init_mm.mmlist);
665 spin_unlock(&mmlist_lock);
667 dec_mm_counter(mm, anon_rss);
668 #ifdef CONFIG_MIGRATION
671 * Store the pfn of the page in a special migration
672 * pte. do_swap_page() will wait until the migration
673 * pte is removed and then restart fault handling.
676 entry = make_migration_entry(page, pte_write(pteval));
679 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
680 BUG_ON(pte_file(*pte));
682 #ifdef CONFIG_MIGRATION
684 /* Establish migration entry for a file page */
686 entry = make_migration_entry(page, pte_write(pteval));
687 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
690 dec_mm_counter(mm, file_rss);
693 page_remove_rmap(page, vma);
694 page_cache_release(page);
697 pte_unmap_unlock(pte, ptl);
703 * objrmap doesn't work for nonlinear VMAs because the assumption that
704 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
705 * Consequently, given a particular page and its ->index, we cannot locate the
706 * ptes which are mapping that page without an exhaustive linear search.
708 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
709 * maps the file to which the target page belongs. The ->vm_private_data field
710 * holds the current cursor into that scan. Successive searches will circulate
711 * around the vma's virtual address space.
713 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
714 * more scanning pressure is placed against them as well. Eventually pages
715 * will become fully unmapped and are eligible for eviction.
717 * For very sparsely populated VMAs this is a little inefficient - chances are
718 * there there won't be many ptes located within the scan cluster. In this case
719 * maybe we could scan further - to the end of the pte page, perhaps.
721 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
722 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
724 static void try_to_unmap_cluster(unsigned long cursor,
725 unsigned int *mapcount, struct vm_area_struct *vma)
727 struct mm_struct *mm = vma->vm_mm;
735 unsigned long address;
738 address = (vma->vm_start + cursor) & CLUSTER_MASK;
739 end = address + CLUSTER_SIZE;
740 if (address < vma->vm_start)
741 address = vma->vm_start;
742 if (end > vma->vm_end)
745 pgd = pgd_offset(mm, address);
746 if (!pgd_present(*pgd))
749 pud = pud_offset(pgd, address);
750 if (!pud_present(*pud))
753 pmd = pmd_offset(pud, address);
754 if (!pmd_present(*pmd))
757 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
759 /* Update high watermark before we lower rss */
760 update_hiwater_rss(mm);
762 for (; address < end; pte++, address += PAGE_SIZE) {
763 if (!pte_present(*pte))
765 page = vm_normal_page(vma, address, *pte);
766 BUG_ON(!page || PageAnon(page));
768 if (ptep_clear_flush_young(vma, address, pte))
771 /* Nuke the page table entry. */
772 flush_cache_page(vma, address, pte_pfn(*pte));
773 pteval = ptep_clear_flush(vma, address, pte);
775 /* If nonlinear, store the file page offset in the pte. */
776 if (page->index != linear_page_index(vma, address))
777 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
779 /* Move the dirty bit to the physical page now the pte is gone. */
780 if (pte_dirty(pteval))
781 set_page_dirty(page);
783 page_remove_rmap(page, vma);
784 page_cache_release(page);
785 dec_mm_counter(mm, file_rss);
788 pte_unmap_unlock(pte - 1, ptl);
791 static int try_to_unmap_anon(struct page *page, int migration)
793 struct anon_vma *anon_vma;
794 struct vm_area_struct *vma;
795 int ret = SWAP_AGAIN;
797 anon_vma = page_lock_anon_vma(page);
801 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
802 ret = try_to_unmap_one(page, vma, migration);
803 if (ret == SWAP_FAIL || !page_mapped(page))
806 spin_unlock(&anon_vma->lock);
811 * try_to_unmap_file - unmap file page using the object-based rmap method
812 * @page: the page to unmap
814 * Find all the mappings of a page using the mapping pointer and the vma chains
815 * contained in the address_space struct it points to.
817 * This function is only called from try_to_unmap for object-based pages.
819 static int try_to_unmap_file(struct page *page, int migration)
821 struct address_space *mapping = page->mapping;
822 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
823 struct vm_area_struct *vma;
824 struct prio_tree_iter iter;
825 int ret = SWAP_AGAIN;
826 unsigned long cursor;
827 unsigned long max_nl_cursor = 0;
828 unsigned long max_nl_size = 0;
829 unsigned int mapcount;
831 spin_lock(&mapping->i_mmap_lock);
832 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
833 ret = try_to_unmap_one(page, vma, migration);
834 if (ret == SWAP_FAIL || !page_mapped(page))
838 if (list_empty(&mapping->i_mmap_nonlinear))
841 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
842 shared.vm_set.list) {
843 if ((vma->vm_flags & VM_LOCKED) && !migration)
845 cursor = (unsigned long) vma->vm_private_data;
846 if (cursor > max_nl_cursor)
847 max_nl_cursor = cursor;
848 cursor = vma->vm_end - vma->vm_start;
849 if (cursor > max_nl_size)
850 max_nl_size = cursor;
853 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
859 * We don't try to search for this page in the nonlinear vmas,
860 * and page_referenced wouldn't have found it anyway. Instead
861 * just walk the nonlinear vmas trying to age and unmap some.
862 * The mapcount of the page we came in with is irrelevant,
863 * but even so use it as a guide to how hard we should try?
865 mapcount = page_mapcount(page);
868 cond_resched_lock(&mapping->i_mmap_lock);
870 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
871 if (max_nl_cursor == 0)
872 max_nl_cursor = CLUSTER_SIZE;
875 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
876 shared.vm_set.list) {
877 if ((vma->vm_flags & VM_LOCKED) && !migration)
879 cursor = (unsigned long) vma->vm_private_data;
880 while ( cursor < max_nl_cursor &&
881 cursor < vma->vm_end - vma->vm_start) {
882 try_to_unmap_cluster(cursor, &mapcount, vma);
883 cursor += CLUSTER_SIZE;
884 vma->vm_private_data = (void *) cursor;
885 if ((int)mapcount <= 0)
888 vma->vm_private_data = (void *) max_nl_cursor;
890 cond_resched_lock(&mapping->i_mmap_lock);
891 max_nl_cursor += CLUSTER_SIZE;
892 } while (max_nl_cursor <= max_nl_size);
895 * Don't loop forever (perhaps all the remaining pages are
896 * in locked vmas). Reset cursor on all unreserved nonlinear
897 * vmas, now forgetting on which ones it had fallen behind.
899 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
900 vma->vm_private_data = NULL;
902 spin_unlock(&mapping->i_mmap_lock);
907 * try_to_unmap - try to remove all page table mappings to a page
908 * @page: the page to get unmapped
910 * Tries to remove all the page table entries which are mapping this
911 * page, used in the pageout path. Caller must hold the page lock.
914 * SWAP_SUCCESS - we succeeded in removing all mappings
915 * SWAP_AGAIN - we missed a mapping, try again later
916 * SWAP_FAIL - the page is unswappable
918 int try_to_unmap(struct page *page, int migration)
922 BUG_ON(!PageLocked(page));
925 ret = try_to_unmap_anon(page, migration);
927 ret = try_to_unmap_file(page, migration);
929 if (!page_mapped(page))