X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Frmap.c;h=bca42f9d298bd8f9decf04e5c08868b0988a3fa6;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=5577805ae992b6b60533441a5cb89bdad7327007;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/mm/rmap.c b/mm/rmap.c index 5577805ae..bca42f9d2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -4,22 +4,46 @@ * Copyright 2001, Rik van Riel * Released under the General Public License (GPL). * + * Simple, low overhead reverse mapping scheme. + * Please try to keep this thing as modular as possible. * - * Simple, low overhead pte-based reverse mapping scheme. - * This is kept modular because we may want to experiment - * with object-based reverse mapping schemes. Please try - * to keep this thing as modular as possible. + * Provides methods for unmapping each kind of mapped page: + * the anon methods track anonymous pages, and + * the file methods track pages belonging to an inode. + * + * Original design by Rik van Riel 2001 + * File methods by Dave McCracken 2003, 2004 + * Anonymous methods by Andrea Arcangeli 2004 + * Contributions by Hugh Dickins 2003, 2004 */ /* - * Locking: - * - the page->pte.chain is protected by the PG_maplock bit, - * which nests within the the mm->page_table_lock, - * which nests within the page lock. - * - because swapout locking is opposite to the locking order - * in the page fault path, the swapout path uses trylocks - * on the mm->page_table_lock + * Lock ordering in mm: + * + * inode->i_mutex (while writing or truncating, not reading or faulting) + * inode->i_alloc_sem + * + * When a page fault occurs in writing from user to file, down_read + * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within + * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never + * taken together; in truncation, i_mutex is taken outermost. + * + * mm->mmap_sem + * page->flags PG_locked (lock_page) + * mapping->i_mmap_lock + * anon_vma->lock + * mm->page_table_lock or pte_lock + * zone->lru_lock (in mark_page_accessed, isolate_lru_page) + * swap_lock (in swap_duplicate, swap_info_get) + * mmlist_lock (in mmput, drain_mmlist and others) + * mapping->private_lock (in __set_page_dirty_buffers) + * inode_lock (in set_page_dirty's __mark_inode_dirty) + * sb_lock (within inode_lock in fs/fs-writeback.c) + * mapping->tree_lock (widely used, in set_page_dirty, + * in arch-dependent flush_dcache_mmap_lock, + * within inode_lock in __sync_single_inode) */ + #include #include #include @@ -27,106 +51,367 @@ #include #include #include -#include -#include +#include +#include +#include +#include -#include -#include -#include #include -/* - * Something oopsable to put for now in the page->mapping - * of an anonymous page, to test that it is ignored. - */ -#define ANON_MAPPING_DEBUG ((struct address_space *) 0) +struct kmem_cache *anon_vma_cachep; -static inline void clear_page_anon(struct page *page) +static inline void validate_anon_vma(struct vm_area_struct *find_vma) { - BUG_ON(page->mapping != ANON_MAPPING_DEBUG); - page->mapping = NULL; - ClearPageAnon(page); +#ifdef CONFIG_DEBUG_VM + struct anon_vma *anon_vma = find_vma->anon_vma; + struct vm_area_struct *vma; + unsigned int mapcount = 0; + int found = 0; + + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + mapcount++; + BUG_ON(mapcount > 100000); + if (vma == find_vma) + found = 1; + } + BUG_ON(!found); +#endif } -/* - * Shared pages have a chain of pte_chain structures, used to locate - * all the mappings to this page. We only need a pointer to the pte - * here, the page struct for the page table page contains the process - * it belongs to and the offset within that process. - * - * We use an array of pte pointers in this structure to minimise cache misses - * while traversing reverse maps. - */ -#define NRPTE ((L1_CACHE_BYTES - sizeof(unsigned long))/sizeof(pte_addr_t)) +/* This must be called under the mmap_sem. */ +int anon_vma_prepare(struct vm_area_struct *vma) +{ + struct anon_vma *anon_vma = vma->anon_vma; + + might_sleep(); + if (unlikely(!anon_vma)) { + struct mm_struct *mm = vma->vm_mm; + struct anon_vma *allocated, *locked; + + anon_vma = find_mergeable_anon_vma(vma); + if (anon_vma) { + allocated = NULL; + locked = anon_vma; + spin_lock(&locked->lock); + } else { + anon_vma = anon_vma_alloc(); + if (unlikely(!anon_vma)) + return -ENOMEM; + allocated = anon_vma; + locked = NULL; + } -/* - * next_and_idx encodes both the address of the next pte_chain and the - * offset of the lowest-index used pte in ptes[] (which is equal also - * to the offset of the highest-index unused pte in ptes[], plus one). - */ -struct pte_chain { - unsigned long next_and_idx; - pte_addr_t ptes[NRPTE]; -} ____cacheline_aligned; + /* page_table_lock to protect against threads */ + spin_lock(&mm->page_table_lock); + if (likely(!vma->anon_vma)) { + vma->anon_vma = anon_vma; + list_add_tail(&vma->anon_vma_node, &anon_vma->head); + allocated = NULL; + } + spin_unlock(&mm->page_table_lock); + + if (locked) + spin_unlock(&locked->lock); + if (unlikely(allocated)) + anon_vma_free(allocated); + } + return 0; +} + +void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) +{ + BUG_ON(vma->anon_vma != next->anon_vma); + list_del(&next->anon_vma_node); +} + +void __anon_vma_link(struct vm_area_struct *vma) +{ + struct anon_vma *anon_vma = vma->anon_vma; + + if (anon_vma) { + list_add_tail(&vma->anon_vma_node, &anon_vma->head); + validate_anon_vma(vma); + } +} + +void anon_vma_link(struct vm_area_struct *vma) +{ + struct anon_vma *anon_vma = vma->anon_vma; + + if (anon_vma) { + spin_lock(&anon_vma->lock); + list_add_tail(&vma->anon_vma_node, &anon_vma->head); + validate_anon_vma(vma); + spin_unlock(&anon_vma->lock); + } +} + +void anon_vma_unlink(struct vm_area_struct *vma) +{ + struct anon_vma *anon_vma = vma->anon_vma; + int empty; + + if (!anon_vma) + return; -kmem_cache_t *pte_chain_cache; + spin_lock(&anon_vma->lock); + validate_anon_vma(vma); + list_del(&vma->anon_vma_node); -static inline struct pte_chain *pte_chain_next(struct pte_chain *pte_chain) + /* We must garbage collect the anon_vma if it's empty */ + empty = list_empty(&anon_vma->head); + spin_unlock(&anon_vma->lock); + + if (empty) + anon_vma_free(anon_vma); +} + +static void anon_vma_ctor(void *data, struct kmem_cache *cachep, + unsigned long flags) { - return (struct pte_chain *)(pte_chain->next_and_idx & ~NRPTE); + if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == + SLAB_CTOR_CONSTRUCTOR) { + struct anon_vma *anon_vma = data; + + spin_lock_init(&anon_vma->lock); + INIT_LIST_HEAD(&anon_vma->head); + } } -static inline struct pte_chain *pte_chain_ptr(unsigned long pte_chain_addr) +void __init anon_vma_init(void) { - return (struct pte_chain *)(pte_chain_addr & ~NRPTE); + anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); } -static inline int pte_chain_idx(struct pte_chain *pte_chain) +/* + * Getting a lock on a stable anon_vma from a page off the LRU is + * tricky: page_lock_anon_vma rely on RCU to guard against the races. + */ +static struct anon_vma *page_lock_anon_vma(struct page *page) { - return pte_chain->next_and_idx & NRPTE; + struct anon_vma *anon_vma = NULL; + unsigned long anon_mapping; + + rcu_read_lock(); + anon_mapping = (unsigned long) page->mapping; + if (!(anon_mapping & PAGE_MAPPING_ANON)) + goto out; + if (!page_mapped(page)) + goto out; + + anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); + spin_lock(&anon_vma->lock); +out: + rcu_read_unlock(); + return anon_vma; } +/* + * At what user virtual address is page expected in vma? + */ static inline unsigned long -pte_chain_encode(struct pte_chain *pte_chain, int idx) +vma_address(struct page *page, struct vm_area_struct *vma) +{ + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + unsigned long address; + + address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { + /* page should be within any vma from prio_tree_next */ + BUG_ON(!PageAnon(page)); + return -EFAULT; + } + return address; +} + +/* + * At what user virtual address is page expected in vma? checking that the + * page matches the vma: currently only used on anon pages, by unuse_vma; + */ +unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { - return (unsigned long)pte_chain | idx; + if (PageAnon(page)) { + if ((void *)vma->anon_vma != + (void *)page->mapping - PAGE_MAPPING_ANON) + return -EFAULT; + } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { + if (!vma->vm_file || + vma->vm_file->f_mapping != page->mapping) + return -EFAULT; + } else + return -EFAULT; + return vma_address(page, vma); } /* - * pte_chain list management policy: + * Check that @page is mapped at @address into @mm. * - * - If a page has a pte_chain list then it is shared by at least two processes, - * because a single sharing uses PageDirect. (Well, this isn't true yet, - * coz this code doesn't collapse singletons back to PageDirect on the remove - * path). - * - A pte_chain list has free space only in the head member - all succeeding - * members are 100% full. - * - If the head element has free space, it occurs in its leading slots. - * - All free space in the pte_chain is at the start of the head member. - * - Insertion into the pte_chain puts a pte pointer in the last free slot of - * the head member. - * - Removal from a pte chain moves the head pte of the head member onto the - * victim pte and frees the head member if it became empty. + * On success returns with pte mapped and locked. + */ +pte_t *page_check_address(struct page *page, struct mm_struct *mm, + unsigned long address, spinlock_t **ptlp) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + return NULL; + + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + return NULL; + + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) + return NULL; + + pte = pte_offset_map(pmd, address); + /* Make a quick check before getting the lock */ + if (!pte_present(*pte)) { + pte_unmap(pte); + return NULL; + } + + ptl = pte_lockptr(mm, pmd); + spin_lock(ptl); + if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { + *ptlp = ptl; + return pte; + } + pte_unmap_unlock(pte, ptl); + return NULL; +} + +/* + * Subfunctions of page_referenced: page_referenced_one called + * repeatedly from either page_referenced_anon or page_referenced_file. */ +static int page_referenced_one(struct page *page, + struct vm_area_struct *vma, unsigned int *mapcount) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long address; + pte_t *pte; + spinlock_t *ptl; + int referenced = 0; + + address = vma_address(page, vma); + if (address == -EFAULT) + goto out; + + pte = page_check_address(page, mm, address, &ptl); + if (!pte) + goto out; + + if (ptep_clear_flush_young(vma, address, pte)) + referenced++; + + /* Pretend the page is referenced if the task has the + swap token and is in the middle of a page fault. */ + if (mm != current->mm && has_swap_token(mm) && + rwsem_is_locked(&mm->mmap_sem)) + referenced++; + + (*mapcount)--; + pte_unmap_unlock(pte, ptl); +out: + return referenced; +} + +static int page_referenced_anon(struct page *page) +{ + unsigned int mapcount; + struct anon_vma *anon_vma; + struct vm_area_struct *vma; + int referenced = 0; + + anon_vma = page_lock_anon_vma(page); + if (!anon_vma) + return referenced; + + mapcount = page_mapcount(page); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + referenced += page_referenced_one(page, vma, &mapcount); + if (!mapcount) + break; + } + spin_unlock(&anon_vma->lock); + return referenced; +} /** - ** VM stuff below this comment - **/ + * page_referenced_file - referenced check for object-based rmap + * @page: the page we're checking references on. + * + * For an object-based mapped page, find all the places it is mapped and + * check/clear the referenced flag. This is done by following the page->mapping + * pointer, then walking the chain of vmas it holds. It returns the number + * of references it found. + * + * This function is only called from page_referenced for object-based pages. + */ +static int page_referenced_file(struct page *page) +{ + unsigned int mapcount; + struct address_space *mapping = page->mapping; + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + struct prio_tree_iter iter; + int referenced = 0; + + /* + * The caller's checks on page->mapping and !PageAnon have made + * sure that this is a file page: the check for page->mapping + * excludes the case just before it gets set on an anon page. + */ + BUG_ON(PageAnon(page)); + + /* + * The page lock not only makes sure that page->mapping cannot + * suddenly be NULLified by truncation, it makes sure that the + * structure at mapping cannot be freed and reused yet, + * so we can safely take mapping->i_mmap_lock. + */ + BUG_ON(!PageLocked(page)); + + spin_lock(&mapping->i_mmap_lock); + + /* + * i_mmap_lock does not stabilize mapcount at all, but mapcount + * is more likely to be accurate if we note it after spinning. + */ + mapcount = page_mapcount(page); + + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) + == (VM_LOCKED|VM_MAYSHARE)) { + referenced++; + break; + } + referenced += page_referenced_one(page, vma, &mapcount); + if (!mapcount) + break; + } + + spin_unlock(&mapping->i_mmap_lock); + return referenced; +} /** * page_referenced - test if the page was referenced * @page: the page to test + * @is_locked: caller holds lock on the page * * Quick test_and_clear_referenced for all mappings to a page, - * returns the number of processes which referenced the page. - * Caller needs to hold the rmap lock. - * - * If the page has a single-entry pte_chain, collapse that back to a PageDirect - * representation. This way, it's only done under memory pressure. + * returns the number of ptes which referenced the page. */ -int fastcall page_referenced(struct page * page) +int page_referenced(struct page *page, int is_locked) { - struct pte_chain *pc; int referenced = 0; if (page_test_and_clear_young(page)) @@ -135,410 +420,512 @@ int fastcall page_referenced(struct page * page) if (TestClearPageReferenced(page)) referenced++; - if (PageDirect(page)) { - pte_t *pte = rmap_ptep_map(page->pte.direct); - if (ptep_test_and_clear_young(pte)) + if (page_mapped(page) && page->mapping) { + if (PageAnon(page)) + referenced += page_referenced_anon(page); + else if (is_locked) + referenced += page_referenced_file(page); + else if (TestSetPageLocked(page)) referenced++; - rmap_ptep_unmap(pte); - } else { - int nr_chains = 0; - - /* Check all the page tables mapping this page. */ - for (pc = page->pte.chain; pc; pc = pte_chain_next(pc)) { - int i; - - for (i = pte_chain_idx(pc); i < NRPTE; i++) { - pte_addr_t pte_paddr = pc->ptes[i]; - pte_t *p; - - p = rmap_ptep_map(pte_paddr); - if (ptep_test_and_clear_young(p)) - referenced++; - rmap_ptep_unmap(p); - nr_chains++; - } - } - if (nr_chains == 1) { - pc = page->pte.chain; - page->pte.direct = pc->ptes[NRPTE-1]; - SetPageDirect(page); - pc->ptes[NRPTE-1] = 0; - __pte_chain_free(pc); + else { + if (page->mapping) + referenced += page_referenced_file(page); + unlock_page(page); } } return referenced; } -/** - * page_add_rmap - add reverse mapping entry to a page - * @page: the page to add the mapping to - * @ptep: the page table entry mapping this page - * - * Add a new pte reverse mapping to a page. - * The caller needs to hold the mm->page_table_lock. - */ -struct pte_chain * fastcall -page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain) +static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) { - pte_addr_t pte_paddr = ptep_to_paddr(ptep); - struct pte_chain *cur_pte_chain; + struct mm_struct *mm = vma->vm_mm; + unsigned long address; + pte_t *pte, entry; + spinlock_t *ptl; + int ret = 0; + + address = vma_address(page, vma); + if (address == -EFAULT) + goto out; - if (PageReserved(page)) - return pte_chain; + pte = page_check_address(page, mm, address, &ptl); + if (!pte) + goto out; - rmap_lock(page); + if (!pte_dirty(*pte) && !pte_write(*pte)) + goto unlock; - if (page->pte.direct == 0) { - page->pte.direct = pte_paddr; - SetPageDirect(page); - if (!page->mapping) { - SetPageAnon(page); - page->mapping = ANON_MAPPING_DEBUG; - } - inc_page_state(nr_mapped); - goto out; - } + entry = ptep_get_and_clear(mm, address, pte); + entry = pte_mkclean(entry); + entry = pte_wrprotect(entry); + ptep_establish(vma, address, pte, entry); + lazy_mmu_prot_update(entry); + ret = 1; - if (PageDirect(page)) { - /* Convert a direct pointer into a pte_chain */ - ClearPageDirect(page); - pte_chain->ptes[NRPTE-1] = page->pte.direct; - pte_chain->ptes[NRPTE-2] = pte_paddr; - pte_chain->next_and_idx = pte_chain_encode(NULL, NRPTE-2); - page->pte.direct = 0; - page->pte.chain = pte_chain; - pte_chain = NULL; /* We consumed it */ - goto out; +unlock: + pte_unmap_unlock(pte, ptl); +out: + return ret; +} + +static int page_mkclean_file(struct address_space *mapping, struct page *page) +{ + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + struct prio_tree_iter iter; + int ret = 0; + + BUG_ON(PageAnon(page)); + + spin_lock(&mapping->i_mmap_lock); + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + if (vma->vm_flags & VM_SHARED) + ret += page_mkclean_one(page, vma); } + spin_unlock(&mapping->i_mmap_lock); + return ret; +} - cur_pte_chain = page->pte.chain; - if (cur_pte_chain->ptes[0]) { /* It's full */ - pte_chain->next_and_idx = pte_chain_encode(cur_pte_chain, - NRPTE - 1); - page->pte.chain = pte_chain; - pte_chain->ptes[NRPTE-1] = pte_paddr; - pte_chain = NULL; /* We consumed it */ - goto out; +int page_mkclean(struct page *page) +{ + int ret = 0; + + WARN_ON_ONCE(!PageLocked(page)); + + if (page_mapped(page)) { + struct address_space *mapping = page_mapping(page); + if (mapping) + ret = page_mkclean_file(mapping, page); } - cur_pte_chain->ptes[pte_chain_idx(cur_pte_chain) - 1] = pte_paddr; - cur_pte_chain->next_and_idx--; -out: - rmap_unlock(page); - return pte_chain; + + return ret; } /** - * page_remove_rmap - take down reverse mapping to a page - * @page: page to remove mapping from - * @ptep: page table entry to remove - * - * Removes the reverse mapping from the pte_chain of the page, - * after that the caller can clear the page table entry and free - * the page. - * Caller needs to hold the mm->page_table_lock. + * page_set_anon_rmap - setup new anonymous rmap + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped */ -void fastcall page_remove_rmap(struct page *page, pte_t *ptep) +static void __page_set_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) { - pte_addr_t pte_paddr = ptep_to_paddr(ptep); - struct pte_chain *pc; + struct anon_vma *anon_vma = vma->anon_vma; - if (!pfn_valid(page_to_pfn(page)) || PageReserved(page)) - return; + BUG_ON(!anon_vma); + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + page->mapping = (struct address_space *) anon_vma; - rmap_lock(page); + page->index = linear_page_index(vma, address); - if (!page_mapped(page)) - goto out_unlock; /* remap_page_range() from a driver? */ + /* + * nr_mapped state can be updated without turning off + * interrupts because it is not modified via interrupt. + */ + __inc_zone_page_state(page, NR_ANON_PAGES); +} - if (PageDirect(page)) { - if (page->pte.direct == pte_paddr) { - page->pte.direct = 0; - ClearPageDirect(page); - goto out; - } - } else { - struct pte_chain *start = page->pte.chain; - struct pte_chain *next; - int victim_i = pte_chain_idx(start); - - for (pc = start; pc; pc = next) { - int i; - - next = pte_chain_next(pc); - if (next) - prefetch(next); - for (i = pte_chain_idx(pc); i < NRPTE; i++) { - pte_addr_t pa = pc->ptes[i]; - - if (pa != pte_paddr) - continue; - pc->ptes[i] = start->ptes[victim_i]; - start->ptes[victim_i] = 0; - if (victim_i == NRPTE-1) { - /* Emptied a pte_chain */ - page->pte.chain = pte_chain_next(start); - __pte_chain_free(start); - } else { - start->next_and_idx++; - } - goto out; - } +/** + * page_add_anon_rmap - add pte mapping to an anonymous page + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + * + * The caller needs to hold the pte lock. + */ +void page_add_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + if (atomic_inc_and_test(&page->_mapcount)) + __page_set_anon_rmap(page, vma, address); + /* else checking page index and mapping is racy */ +} + +/* + * page_add_new_anon_rmap - add pte mapping to a new anonymous page + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + * + * Same as page_add_anon_rmap but must only be called on *new* pages. + * This means the inc-and-test can be bypassed. + */ +void page_add_new_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ + __page_set_anon_rmap(page, vma, address); +} + +/** + * page_add_file_rmap - add pte mapping to a file page + * @page: the page to add the mapping to + * + * The caller needs to hold the pte lock. + */ +void page_add_file_rmap(struct page *page) +{ + if (atomic_inc_and_test(&page->_mapcount)) + __inc_zone_page_state(page, NR_FILE_MAPPED); +} + +/** + * page_remove_rmap - take down pte mapping from a page + * @page: page to remove mapping from + * + * The caller needs to hold the pte lock. + */ +void page_remove_rmap(struct page *page, struct vm_area_struct *vma) +{ + if (atomic_add_negative(-1, &page->_mapcount)) { + if (unlikely(page_mapcount(page) < 0)) { + printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); + printk (KERN_EMERG " page->flags = %lx\n", page->flags); + printk (KERN_EMERG " page->count = %x\n", page_count(page)); + printk (KERN_EMERG " page->mapping = %p\n", page->mapping); + if (vma->vm_ops) + print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); + BUG(); } - } -out: - if (!page_mapped(page)) { + /* + * It would be tidy to reset the PageAnon mapping here, + * but that might overwrite a racing page_add_anon_rmap + * which increments mapcount after us but sets mapping + * before us: so leave the reset to free_hot_cold_page, + * and remember that it's only reliable while mapped. + * Leaving it set also helps swapoff to reinstate ptes + * faster for those pages still in swapcache. + */ if (page_test_and_clear_dirty(page)) set_page_dirty(page); - if (PageAnon(page)) - clear_page_anon(page); - dec_page_state(nr_mapped); + __dec_zone_page_state(page, + PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); } -out_unlock: - rmap_unlock(page); } -/** - * try_to_unmap_one - worker function for try_to_unmap - * @page: page to unmap - * @ptep: page table entry to unmap from page - * - * Internal helper function for try_to_unmap, called for each page - * table entry mapping a page. Because locking order here is opposite - * to the locking order used by the page fault path, we use trylocks. - * Locking: - * page lock shrink_list(), trylock - * rmap lock shrink_list() - * mm->page_table_lock try_to_unmap_one(), trylock +/* + * Subfunctions of try_to_unmap: try_to_unmap_one called + * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr) +static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + int migration) { - pte_t *ptep = rmap_ptep_map(paddr); - unsigned long address = ptep_to_address(ptep); - struct mm_struct * mm = ptep_to_mm(ptep); - struct vm_area_struct * vma; - pte_t pte; - int ret; + struct mm_struct *mm = vma->vm_mm; + unsigned long address; + pte_t *pte; + pte_t pteval; + spinlock_t *ptl; + int ret = SWAP_AGAIN; + + address = vma_address(page, vma); + if (address == -EFAULT) + goto out; - if (!mm) - BUG(); + pte = page_check_address(page, mm, address, &ptl); + if (!pte) + goto out; /* - * We need the page_table_lock to protect us from page faults, - * munmap, fork, etc... + * If the page is mlock()d, we cannot swap it out. + * If it's recently referenced (perhaps page_referenced + * skipped over this mm) then we should reactivate it. */ - if (!spin_trylock(&mm->page_table_lock)) { - rmap_ptep_unmap(ptep); - return SWAP_AGAIN; - } - - /* unmap_vmas drops page_table_lock with vma unlinked */ - vma = find_vma(mm, address); - if (!vma) { - ret = SWAP_FAIL; - goto out_unlock; - } - - /* The page is mlock()d, we cannot swap it out. */ - if (vma->vm_flags & VM_LOCKED) { + if (!migration && ((vma->vm_flags & VM_LOCKED) || + (ptep_clear_flush_young(vma, address, pte)))) { ret = SWAP_FAIL; - goto out_unlock; + goto out_unmap; } /* Nuke the page table entry. */ - flush_cache_page(vma, address); - pte = ptep_clear_flush(vma, address, ptep); - - if (PageAnon(page)) { - swp_entry_t entry = { .val = page->private }; - /* - * Store the swap location in the pte. - * See handle_pte_fault() ... - */ - BUG_ON(!PageSwapCache(page)); - swap_duplicate(entry); - set_pte(ptep, swp_entry_to_pte(entry)); - BUG_ON(pte_file(*ptep)); - } else { - /* - * If a nonlinear mapping then store the file page offset - * in the pte. - */ - BUG_ON(!page->mapping); - if (page->index != linear_page_index(vma, address)) { - set_pte(ptep, pgoff_to_pte(page->index)); - BUG_ON(!pte_file(*ptep)); - } - } + flush_cache_page(vma, address, page_to_pfn(page)); + pteval = ptep_clear_flush(vma, address, pte); /* Move the dirty bit to the physical page now the pte is gone. */ - if (pte_dirty(pte)) + if (pte_dirty(pteval)) set_page_dirty(page); - mm->rss--; + /* Update high watermark before we lower rss */ + update_hiwater_rss(mm); + + if (PageAnon(page)) { + swp_entry_t entry = { .val = page_private(page) }; + + if (PageSwapCache(page)) { + /* + * Store the swap location in the pte. + * See handle_pte_fault() ... + */ + swap_duplicate(entry); + if (list_empty(&mm->mmlist)) { + spin_lock(&mmlist_lock); + if (list_empty(&mm->mmlist)) + list_add(&mm->mmlist, &init_mm.mmlist); + spin_unlock(&mmlist_lock); + } + dec_mm_counter(mm, anon_rss); +#ifdef CONFIG_MIGRATION + } else { + /* + * Store the pfn of the page in a special migration + * pte. do_swap_page() will wait until the migration + * pte is removed and then restart fault handling. + */ + BUG_ON(!migration); + entry = make_migration_entry(page, pte_write(pteval)); +#endif + } + set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); + BUG_ON(pte_file(*pte)); + } else +#ifdef CONFIG_MIGRATION + if (migration) { + /* Establish migration entry for a file page */ + swp_entry_t entry; + entry = make_migration_entry(page, pte_write(pteval)); + set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); + } else +#endif + dec_mm_counter(mm, file_rss); + + + page_remove_rmap(page, vma); page_cache_release(page); - ret = SWAP_SUCCESS; -out_unlock: - rmap_ptep_unmap(ptep); - spin_unlock(&mm->page_table_lock); +out_unmap: + pte_unmap_unlock(pte, ptl); +out: return ret; } -/** - * try_to_unmap - try to remove all page table mappings to a page - * @page: the page to get unmapped +/* + * objrmap doesn't work for nonlinear VMAs because the assumption that + * offset-into-file correlates with offset-into-virtual-addresses does not hold. + * Consequently, given a particular page and its ->index, we cannot locate the + * ptes which are mapping that page without an exhaustive linear search. * - * Tries to remove all the page table entries which are mapping this - * page, used in the pageout path. Caller must hold the page lock - * and its rmap lock. Return values are: + * So what this code does is a mini "virtual scan" of each nonlinear VMA which + * maps the file to which the target page belongs. The ->vm_private_data field + * holds the current cursor into that scan. Successive searches will circulate + * around the vma's virtual address space. * - * SWAP_SUCCESS - we succeeded in removing all mappings - * SWAP_AGAIN - we missed a trylock, try again later - * SWAP_FAIL - the page is unswappable + * So as more replacement pressure is applied to the pages in a nonlinear VMA, + * more scanning pressure is placed against them as well. Eventually pages + * will become fully unmapped and are eligible for eviction. + * + * For very sparsely populated VMAs this is a little inefficient - chances are + * there there won't be many ptes located within the scan cluster. In this case + * maybe we could scan further - to the end of the pte page, perhaps. */ -int fastcall try_to_unmap(struct page * page) +#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) +#define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) + +static void try_to_unmap_cluster(unsigned long cursor, + unsigned int *mapcount, struct vm_area_struct *vma) { - struct pte_chain *pc, *next_pc, *start; - int ret = SWAP_SUCCESS; - int victim_i; - - /* This page should not be on the pageout lists. */ - if (PageReserved(page)) - BUG(); - if (!PageLocked(page)) - BUG(); - - if (PageDirect(page)) { - ret = try_to_unmap_one(page, page->pte.direct); - if (ret == SWAP_SUCCESS) { - page->pte.direct = 0; - ClearPageDirect(page); - } - goto out; - } + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t pteval; + spinlock_t *ptl; + struct page *page; + unsigned long address; + unsigned long end; + + address = (vma->vm_start + cursor) & CLUSTER_MASK; + end = address + CLUSTER_SIZE; + if (address < vma->vm_start) + address = vma->vm_start; + if (end > vma->vm_end) + end = vma->vm_end; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + return; - start = page->pte.chain; - victim_i = pte_chain_idx(start); - for (pc = start; pc; pc = next_pc) { - int i; - - next_pc = pte_chain_next(pc); - if (next_pc) - prefetch(next_pc); - for (i = pte_chain_idx(pc); i < NRPTE; i++) { - pte_addr_t pte_paddr = pc->ptes[i]; - - switch (try_to_unmap_one(page, pte_paddr)) { - case SWAP_SUCCESS: - /* - * Release a slot. If we're releasing the - * first pte in the first pte_chain then - * pc->ptes[i] and start->ptes[victim_i] both - * refer to the same thing. It works out. - */ - pc->ptes[i] = start->ptes[victim_i]; - start->ptes[victim_i] = 0; - victim_i++; - if (victim_i == NRPTE) { - page->pte.chain = pte_chain_next(start); - __pte_chain_free(start); - start = page->pte.chain; - victim_i = 0; - } else { - start->next_and_idx++; - } - break; - case SWAP_AGAIN: - /* Skip this pte, remembering status. */ - ret = SWAP_AGAIN; - continue; - case SWAP_FAIL: - ret = SWAP_FAIL; - goto out; - } - } - } -out: - if (!page_mapped(page)) { - if (page_test_and_clear_dirty(page)) + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + return; + + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) + return; + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + + /* Update high watermark before we lower rss */ + update_hiwater_rss(mm); + + for (; address < end; pte++, address += PAGE_SIZE) { + if (!pte_present(*pte)) + continue; + page = vm_normal_page(vma, address, *pte); + BUG_ON(!page || PageAnon(page)); + + if (ptep_clear_flush_young(vma, address, pte)) + continue; + + /* Nuke the page table entry. */ + flush_cache_page(vma, address, pte_pfn(*pte)); + pteval = ptep_clear_flush(vma, address, pte); + + /* If nonlinear, store the file page offset in the pte. */ + if (page->index != linear_page_index(vma, address)) + set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); + + /* Move the dirty bit to the physical page now the pte is gone. */ + if (pte_dirty(pteval)) set_page_dirty(page); - if (PageAnon(page)) - clear_page_anon(page); - dec_page_state(nr_mapped); - ret = SWAP_SUCCESS; + + page_remove_rmap(page, vma); + page_cache_release(page); + dec_mm_counter(mm, file_rss); + (*mapcount)--; } - return ret; + pte_unmap_unlock(pte - 1, ptl); } -/** - ** No more VM stuff below this comment, only pte_chain helper - ** functions. - **/ - -static void pte_chain_ctor(void *p, kmem_cache_t *cachep, unsigned long flags) +static int try_to_unmap_anon(struct page *page, int migration) { - struct pte_chain *pc = p; - - memset(pc, 0, sizeof(*pc)); + struct anon_vma *anon_vma; + struct vm_area_struct *vma; + int ret = SWAP_AGAIN; + + anon_vma = page_lock_anon_vma(page); + if (!anon_vma) + return ret; + + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + ret = try_to_unmap_one(page, vma, migration); + if (ret == SWAP_FAIL || !page_mapped(page)) + break; + } + spin_unlock(&anon_vma->lock); + return ret; } -DEFINE_PER_CPU(struct pte_chain *, local_pte_chain) = 0; - /** - * __pte_chain_free - free pte_chain structure - * @pte_chain: pte_chain struct to free + * try_to_unmap_file - unmap file page using the object-based rmap method + * @page: the page to unmap + * + * Find all the mappings of a page using the mapping pointer and the vma chains + * contained in the address_space struct it points to. + * + * This function is only called from try_to_unmap for object-based pages. */ -void __pte_chain_free(struct pte_chain *pte_chain) +static int try_to_unmap_file(struct page *page, int migration) { - struct pte_chain **pte_chainp; - - pte_chainp = &get_cpu_var(local_pte_chain); - if (pte_chain->next_and_idx) - pte_chain->next_and_idx = 0; - if (*pte_chainp) - kmem_cache_free(pte_chain_cache, *pte_chainp); - *pte_chainp = pte_chain; - put_cpu_var(local_pte_chain); + struct address_space *mapping = page->mapping; + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + struct prio_tree_iter iter; + int ret = SWAP_AGAIN; + unsigned long cursor; + unsigned long max_nl_cursor = 0; + unsigned long max_nl_size = 0; + unsigned int mapcount; + + spin_lock(&mapping->i_mmap_lock); + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + ret = try_to_unmap_one(page, vma, migration); + if (ret == SWAP_FAIL || !page_mapped(page)) + goto out; + } + + if (list_empty(&mapping->i_mmap_nonlinear)) + goto out; + + list_for_each_entry(vma, &mapping->i_mmap_nonlinear, + shared.vm_set.list) { + if ((vma->vm_flags & VM_LOCKED) && !migration) + continue; + cursor = (unsigned long) vma->vm_private_data; + if (cursor > max_nl_cursor) + max_nl_cursor = cursor; + cursor = vma->vm_end - vma->vm_start; + if (cursor > max_nl_size) + max_nl_size = cursor; + } + + if (max_nl_size == 0) { /* any nonlinears locked or reserved */ + ret = SWAP_FAIL; + goto out; + } + + /* + * We don't try to search for this page in the nonlinear vmas, + * and page_referenced wouldn't have found it anyway. Instead + * just walk the nonlinear vmas trying to age and unmap some. + * The mapcount of the page we came in with is irrelevant, + * but even so use it as a guide to how hard we should try? + */ + mapcount = page_mapcount(page); + if (!mapcount) + goto out; + cond_resched_lock(&mapping->i_mmap_lock); + + max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; + if (max_nl_cursor == 0) + max_nl_cursor = CLUSTER_SIZE; + + do { + list_for_each_entry(vma, &mapping->i_mmap_nonlinear, + shared.vm_set.list) { + if ((vma->vm_flags & VM_LOCKED) && !migration) + continue; + cursor = (unsigned long) vma->vm_private_data; + while ( cursor < max_nl_cursor && + cursor < vma->vm_end - vma->vm_start) { + try_to_unmap_cluster(cursor, &mapcount, vma); + cursor += CLUSTER_SIZE; + vma->vm_private_data = (void *) cursor; + if ((int)mapcount <= 0) + goto out; + } + vma->vm_private_data = (void *) max_nl_cursor; + } + cond_resched_lock(&mapping->i_mmap_lock); + max_nl_cursor += CLUSTER_SIZE; + } while (max_nl_cursor <= max_nl_size); + + /* + * Don't loop forever (perhaps all the remaining pages are + * in locked vmas). Reset cursor on all unreserved nonlinear + * vmas, now forgetting on which ones it had fallen behind. + */ + list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) + vma->vm_private_data = NULL; +out: + spin_unlock(&mapping->i_mmap_lock); + return ret; } -/* - * pte_chain_alloc(): allocate a pte_chain structure for use by page_add_rmap(). +/** + * try_to_unmap - try to remove all page table mappings to a page + * @page: the page to get unmapped * - * The caller of page_add_rmap() must perform the allocation because - * page_add_rmap() is invariably called under spinlock. Often, page_add_rmap() - * will not actually use the pte_chain, because there is space available in one - * of the existing pte_chains which are attached to the page. So the case of - * allocating and then freeing a single pte_chain is specially optimised here, - * with a one-deep per-cpu cache. + * Tries to remove all the page table entries which are mapping this + * page, used in the pageout path. Caller must hold the page lock. + * Return values are: + * + * SWAP_SUCCESS - we succeeded in removing all mappings + * SWAP_AGAIN - we missed a mapping, try again later + * SWAP_FAIL - the page is unswappable */ -struct pte_chain *pte_chain_alloc(int gfp_flags) +int try_to_unmap(struct page *page, int migration) { - struct pte_chain *ret; - struct pte_chain **pte_chainp; - - might_sleep_if(gfp_flags & __GFP_WAIT); - - pte_chainp = &get_cpu_var(local_pte_chain); - if (*pte_chainp) { - ret = *pte_chainp; - *pte_chainp = NULL; - put_cpu_var(local_pte_chain); - } else { - put_cpu_var(local_pte_chain); - ret = kmem_cache_alloc(pte_chain_cache, gfp_flags); - } + int ret; + + BUG_ON(!PageLocked(page)); + + if (PageAnon(page)) + ret = try_to_unmap_anon(page, migration); + else + ret = try_to_unmap_file(page, migration); + + if (!page_mapped(page)) + ret = SWAP_SUCCESS; return ret; } -void __init pte_chain_init(void) -{ - pte_chain_cache = kmem_cache_create( "pte_chain", - sizeof(struct pte_chain), - sizeof(struct pte_chain), - 0, - pte_chain_ctor, - NULL); - - if (!pte_chain_cache) - panic("failed to create pte_chain cache!\n"); -}