#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/module.h>
-#include <linux/kallsyms.h>
#include <linux/vs_memory.h>
#include <asm/tlbflush.h>
-struct kmem_cache *anon_vma_cachep;
+//#define RMAP_DEBUG /* can be enabled only for debugging */
+
+kmem_cache_t *anon_vma_cachep;
static inline void validate_anon_vma(struct vm_area_struct *find_vma)
{
-#ifdef CONFIG_DEBUG_VM
+#ifdef RMAP_DEBUG
struct anon_vma *anon_vma = find_vma->anon_vma;
struct vm_area_struct *vma;
unsigned int mapcount = 0;
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
- list_add_tail(&vma->anon_vma_node, &anon_vma->head);
+ list_add(&vma->anon_vma_node, &anon_vma->head);
allocated = NULL;
}
spin_unlock(&mm->page_table_lock);
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma) {
- list_add_tail(&vma->anon_vma_node, &anon_vma->head);
+ list_add(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
}
}
if (anon_vma) {
spin_lock(&anon_vma->lock);
- list_add_tail(&vma->anon_vma_node, &anon_vma->head);
+ list_add(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
spin_unlock(&anon_vma->lock);
}
anon_vma_free(anon_vma);
}
-static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
- unsigned long flags)
+static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
return anon_vma;
}
+#ifdef CONFIG_MIGRATION
+/*
+ * Remove an anonymous page from swap replacing the swap pte's
+ * through real pte's pointing to valid pages and then releasing
+ * the page from the swap cache.
+ *
+ * Must hold page lock on page and mmap_sem of one vma that contains
+ * the page.
+ */
+void remove_from_swap(struct page *page)
+{
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+ unsigned long mapping;
+
+ if (!PageSwapCache(page))
+ return;
+
+ mapping = (unsigned long)page->mapping;
+
+ if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
+ return;
+
+ /*
+ * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
+ */
+ anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
+ spin_lock(&anon_vma->lock);
+
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
+ remove_vma_swap(vma, page);
+
+ spin_unlock(&anon_vma->lock);
+ delete_from_swap_cache(page);
+}
+EXPORT_SYMBOL(remove_from_swap);
+#endif
+
/*
* At what user virtual address is page expected in vma?
*/
return referenced;
}
-static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
- pte_t *pte, entry;
- spinlock_t *ptl;
- int ret = 0;
-
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
- pte = page_check_address(page, mm, address, &ptl);
- if (!pte)
- goto out;
-
- if (!pte_dirty(*pte) && !pte_write(*pte))
- goto unlock;
-
- entry = ptep_get_and_clear(mm, address, pte);
- entry = pte_mkclean(entry);
- entry = pte_wrprotect(entry);
- ptep_establish(vma, address, pte, entry);
- lazy_mmu_prot_update(entry);
- ret = 1;
-
-unlock:
- pte_unmap_unlock(pte, ptl);
-out:
- return ret;
-}
-
-static int page_mkclean_file(struct address_space *mapping, struct page *page)
-{
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
- int ret = 0;
-
- BUG_ON(PageAnon(page));
-
- spin_lock(&mapping->i_mmap_lock);
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (vma->vm_flags & VM_SHARED)
- ret += page_mkclean_one(page, vma);
- }
- spin_unlock(&mapping->i_mmap_lock);
- return ret;
-}
-
-int page_mkclean(struct page *page)
-{
- int ret = 0;
-
- WARN_ON_ONCE(!PageLocked(page));
-
- if (page_mapped(page)) {
- struct address_space *mapping = page_mapping(page);
- if (mapping)
- ret = page_mkclean_file(mapping, page);
- }
-
- return ret;
-}
-
/**
* page_set_anon_rmap - setup new anonymous rmap
* @page: the page to add the mapping to
* nr_mapped state can be updated without turning off
* interrupts because it is not modified via interrupt.
*/
- __inc_zone_page_state(page, NR_ANON_PAGES);
+ __inc_page_state(nr_mapped);
}
/**
void page_add_file_rmap(struct page *page)
{
if (atomic_inc_and_test(&page->_mapcount))
- __inc_zone_page_state(page, NR_FILE_MAPPED);
+ __inc_page_state(nr_mapped);
}
/**
*
* The caller needs to hold the pte lock.
*/
-void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
+void page_remove_rmap(struct page *page)
{
if (atomic_add_negative(-1, &page->_mapcount)) {
- if (unlikely(page_mapcount(page) < 0)) {
+ if (page_mapcount(page) < 0) {
printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
printk (KERN_EMERG " page->flags = %lx\n", page->flags);
printk (KERN_EMERG " page->count = %x\n", page_count(page));
printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
- if (vma->vm_ops)
- print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
- BUG();
}
+
+ BUG_ON(page_mapcount(page) < 0);
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
*/
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
- __dec_zone_page_state(page,
- PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
+ __dec_page_state(nr_mapped);
}
}
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- int migration)
+ int ignore_refs)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
- if (!migration && ((vma->vm_flags & VM_LOCKED) ||
- (ptep_clear_flush_young(vma, address, pte)))) {
+ if ((vma->vm_flags & VM_LOCKED) ||
+ (ptep_clear_flush_young(vma, address, pte)
+ && !ignore_refs)) {
ret = SWAP_FAIL;
goto out_unmap;
}
if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
-
- if (PageSwapCache(page)) {
- /*
- * Store the swap location in the pte.
- * See handle_pte_fault() ...
- */
- swap_duplicate(entry);
- if (list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- if (list_empty(&mm->mmlist))
- list_add(&mm->mmlist, &init_mm.mmlist);
- spin_unlock(&mmlist_lock);
- }
- dec_mm_counter(mm, anon_rss);
-#ifdef CONFIG_MIGRATION
- } else {
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- BUG_ON(!migration);
- entry = make_migration_entry(page, pte_write(pteval));
-#endif
+ /*
+ * Store the swap location in the pte.
+ * See handle_pte_fault() ...
+ */
+ BUG_ON(!PageSwapCache(page));
+ swap_duplicate(entry);
+ if (list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&mm->mmlist))
+ list_add(&mm->mmlist, &init_mm.mmlist);
+ spin_unlock(&mmlist_lock);
}
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
+ dec_mm_counter(mm, anon_rss);
} else
-#ifdef CONFIG_MIGRATION
- if (migration) {
- /* Establish migration entry for a file page */
- swp_entry_t entry;
- entry = make_migration_entry(page, pte_write(pteval));
- set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
- } else
-#endif
dec_mm_counter(mm, file_rss);
-
- page_remove_rmap(page, vma);
+ page_remove_rmap(page);
page_cache_release(page);
out_unmap:
if (pte_dirty(pteval))
set_page_dirty(page);
- page_remove_rmap(page, vma);
+ page_remove_rmap(page);
page_cache_release(page);
dec_mm_counter(mm, file_rss);
(*mapcount)--;
pte_unmap_unlock(pte - 1, ptl);
}
-static int try_to_unmap_anon(struct page *page, int migration)
+static int try_to_unmap_anon(struct page *page, int ignore_refs)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- ret = try_to_unmap_one(page, vma, migration);
+ ret = try_to_unmap_one(page, vma, ignore_refs);
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
*
* This function is only called from try_to_unmap for object-based pages.
*/
-static int try_to_unmap_file(struct page *page, int migration)
+static int try_to_unmap_file(struct page *page, int ignore_refs)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- ret = try_to_unmap_one(page, vma, migration);
+ ret = try_to_unmap_one(page, vma, ignore_refs);
if (ret == SWAP_FAIL || !page_mapped(page))
goto out;
}
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if ((vma->vm_flags & VM_LOCKED) && !migration)
+ if (vma->vm_flags & VM_LOCKED)
continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if ((vma->vm_flags & VM_LOCKED) && !migration)
+ if (vma->vm_flags & VM_LOCKED)
continue;
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&
* SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
*/
-int try_to_unmap(struct page *page, int migration)
+int try_to_unmap(struct page *page, int ignore_refs)
{
int ret;
BUG_ON(!PageLocked(page));
if (PageAnon(page))
- ret = try_to_unmap_anon(page, migration);
+ ret = try_to_unmap_anon(page, ignore_refs);
else
- ret = try_to_unmap_file(page, migration);
+ ret = try_to_unmap_file(page, ignore_refs);
if (!page_mapped(page))
ret = SWAP_SUCCESS;