};
static struct backing_dev_info swap_backing_dev_info = {
- .memory_backed = 1, /* Does not contribute to dirty memory */
+ .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
.unplug_io_fn = swap_unplug_io_fn,
};
struct address_space swapper_space = {
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
- .tree_lock = SPIN_LOCK_UNLOCKED,
+ .tree_lock = RW_LOCK_UNLOCKED,
.a_ops = &swap_aops,
.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
.backing_dev_info = &swap_backing_dev_info,
BUG_ON(PagePrivate(page));
error = radix_tree_preload(gfp_mask);
if (!error) {
- spin_lock_irq(&swapper_space.tree_lock);
+ write_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page);
if (!error) {
total_swapcache_pages++;
pagecache_acct(1);
}
- spin_unlock_irq(&swapper_space.tree_lock);
+ write_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end();
}
return error;
int add_to_swap(struct page * page)
{
swp_entry_t entry;
- int pf_flags;
int err;
if (!PageLocked(page))
if (!entry.val)
return 0;
- /* Radix-tree node allocations are performing
- * GFP_ATOMIC allocations under PF_MEMALLOC.
- * They can completely exhaust the page allocator.
- *
- * So PF_MEMALLOC is dropped here. This causes the slab
- * allocations to fail earlier, so radix-tree nodes will
- * then be allocated from the mempool reserves.
+ /*
+ * Radix-tree node allocations from PF_MEMALLOC contexts could
+ * completely exhaust the page allocator. __GFP_NOMEMALLOC
+ * stops emergency reserves from being allocated.
*
- * We're still using __GFP_HIGH for radix-tree node
- * allocations, so some of the emergency pools are available,
- * just not all of them.
+ * TODO: this could cause a theoretical memory reclaim
+ * deadlock in the swap out path.
*/
-
- pf_flags = current->flags;
- current->flags &= ~PF_MEMALLOC;
-
/*
* Add it to the swap cache and mark it dirty
*/
- err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN);
-
- if (pf_flags & PF_MEMALLOC)
- current->flags |= PF_MEMALLOC;
+ err = __add_to_swap_cache(page, entry,
+ GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
switch (err) {
case 0: /* Success */
entry.val = page->private;
- spin_lock_irq(&swapper_space.tree_lock);
+ write_lock_irq(&swapper_space.tree_lock);
__delete_from_swap_cache(page);
- spin_unlock_irq(&swapper_space.tree_lock);
+ write_unlock_irq(&swapper_space.tree_lock);
swap_free(entry);
page_cache_release(page);
{
struct page *page;
- spin_lock_irq(&swapper_space.tree_lock);
- page = radix_tree_lookup(&swapper_space.page_tree, entry.val);
- if (page) {
- page_cache_get(page);
+ page = find_get_page(&swapper_space, entry.val);
+
+ if (page)
INC_CACHE_INFO(find_success);
- }
- spin_unlock_irq(&swapper_space.tree_lock);
+
INC_CACHE_INFO(find_total);
return page;
}
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
- spin_lock_irq(&swapper_space.tree_lock);
- found_page = radix_tree_lookup(&swapper_space.page_tree,
- entry.val);
- if (found_page)
- page_cache_get(found_page);
- spin_unlock_irq(&swapper_space.tree_lock);
+ found_page = find_get_page(&swapper_space, entry.val);
if (found_page)
break;