*
* Rewritten to use page cache, (C) 1998 Stephen Tweedie
*/
-
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
#include <linux/backing-dev.h>
#include <asm/pgtable.h>
/*
* swapper_space is a fiction, retained to simplify the path through
- * vmscan's shrink_list. Only those fields initialized below are used.
+ * vmscan's shrink_list, to make sync_page look nicer, and to allow
+ * future use of radix_tree tags in the swap cache.
*/
static struct address_space_operations swap_aops = {
.writepage = swap_writepage,
+ .sync_page = block_sync_page,
+ .set_page_dirty = __set_page_dirty_nobuffers,
};
static struct backing_dev_info swap_backing_dev_info = {
- .state = 0, /* uncongested */
+ .memory_backed = 1, /* Does not contribute to dirty memory */
+ .unplug_io_fn = swap_unplug_io_fn,
};
struct address_space swapper_space = {
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC),
.tree_lock = SPIN_LOCK_UNLOCKED,
- .nrpages = 0, /* total_swapcache_pages */
.a_ops = &swap_aops,
+ .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
.backing_dev_info = &swap_backing_dev_info,
};
+EXPORT_SYMBOL(swapper_space);
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
BUG_ON(PagePrivate(page));
error = radix_tree_preload(gfp_mask);
if (!error) {
- page_cache_get(page);
- spin_lock(&swapper_space.tree_lock);
+ spin_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page);
if (!error) {
+ page_cache_get(page);
SetPageLocked(page);
SetPageSwapCache(page);
page->private = entry.val;
total_swapcache_pages++;
pagecache_acct(1);
- } else
- page_cache_release(page);
- spin_unlock(&swapper_space.tree_lock);
+ }
+ spin_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end();
}
return error;
/*
* Add it to the swap cache and mark it dirty
*/
- err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
+ err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN);
if (pf_flags & PF_MEMALLOC)
current->flags |= PF_MEMALLOC;
entry.val = page->private;
- spin_lock(&swapper_space.tree_lock);
+ spin_lock_irq(&swapper_space.tree_lock);
__delete_from_swap_cache(page);
- spin_unlock(&swapper_space.tree_lock);
+ spin_unlock_irq(&swapper_space.tree_lock);
swap_free(entry);
page_cache_release(page);
{
struct page *page;
- spin_lock(&swapper_space.tree_lock);
+ spin_lock_irq(&swapper_space.tree_lock);
page = radix_tree_lookup(&swapper_space.page_tree, entry.val);
if (page) {
page_cache_get(page);
INC_CACHE_INFO(find_success);
}
- spin_unlock(&swapper_space.tree_lock);
+ spin_unlock_irq(&swapper_space.tree_lock);
INC_CACHE_INFO(find_total);
return page;
}
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
-struct page * read_swap_cache_async(swp_entry_t entry)
+struct page *read_swap_cache_async(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
{
struct page *found_page, *new_page = NULL;
int err;
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
- spin_lock(&swapper_space.tree_lock);
+ spin_lock_irq(&swapper_space.tree_lock);
found_page = radix_tree_lookup(&swapper_space.page_tree,
entry.val);
if (found_page)
page_cache_get(found_page);
- spin_unlock(&swapper_space.tree_lock);
+ spin_unlock_irq(&swapper_space.tree_lock);
if (found_page)
break;
* Get a new page to read into from swap.
*/
if (!new_page) {
- new_page = alloc_page(GFP_HIGHUSER);
+ new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (!new_page)
break; /* Out of memory */
}