X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fswap_state.c;h=6e81f2d52edc6aaf51d5f6b8b33cd60729c001ad;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=b6232384d4110bf4a7c0967281d0a565da1cc912;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/mm/swap_state.c b/mm/swap_state.c index b6232384d..6e81f2d52 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -6,35 +6,41 @@ * * Rewritten to use page cache, (C) 1998 Stephen Tweedie */ - +#include #include #include #include #include #include +#include #include #include /* * swapper_space is a fiction, retained to simplify the path through - * vmscan's shrink_list. Only those fields initialized below are used. + * vmscan's shrink_list, to make sync_page look nicer, and to allow + * future use of radix_tree tags in the swap cache. */ static struct address_space_operations swap_aops = { .writepage = swap_writepage, + .sync_page = block_sync_page, + .set_page_dirty = __set_page_dirty_nobuffers, }; static struct backing_dev_info swap_backing_dev_info = { - .state = 0, /* uncongested */ + .memory_backed = 1, /* Does not contribute to dirty memory */ + .unplug_io_fn = swap_unplug_io_fn, }; struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC), .tree_lock = SPIN_LOCK_UNLOCKED, - .nrpages = 0, /* total_swapcache_pages */ .a_ops = &swap_aops, + .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, }; +EXPORT_SYMBOL(swapper_space); #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) @@ -68,19 +74,18 @@ static int __add_to_swap_cache(struct page *page, BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); if (!error) { - page_cache_get(page); - spin_lock(&swapper_space.tree_lock); + spin_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (!error) { + page_cache_get(page); SetPageLocked(page); SetPageSwapCache(page); page->private = entry.val; total_swapcache_pages++; pagecache_acct(1); - } else - page_cache_release(page); - spin_unlock(&swapper_space.tree_lock); + } + spin_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); } return error; @@ -166,7 +171,7 @@ int add_to_swap(struct page * page) /* * Add it to the swap cache and mark it dirty */ - err = __add_to_swap_cache(page, entry, GFP_ATOMIC); + err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN); if (pf_flags & PF_MEMALLOC) current->flags |= PF_MEMALLOC; @@ -207,9 +212,9 @@ void delete_from_swap_cache(struct page *page) entry.val = page->private; - spin_lock(&swapper_space.tree_lock); + spin_lock_irq(&swapper_space.tree_lock); __delete_from_swap_cache(page); - spin_unlock(&swapper_space.tree_lock); + spin_unlock_irq(&swapper_space.tree_lock); swap_free(entry); page_cache_release(page); @@ -308,13 +313,13 @@ struct page * lookup_swap_cache(swp_entry_t entry) { struct page *page; - spin_lock(&swapper_space.tree_lock); + spin_lock_irq(&swapper_space.tree_lock); page = radix_tree_lookup(&swapper_space.page_tree, entry.val); if (page) { page_cache_get(page); INC_CACHE_INFO(find_success); } - spin_unlock(&swapper_space.tree_lock); + spin_unlock_irq(&swapper_space.tree_lock); INC_CACHE_INFO(find_total); return page; } @@ -325,7 +330,8 @@ struct page * lookup_swap_cache(swp_entry_t entry) * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. */ -struct page * read_swap_cache_async(swp_entry_t entry) +struct page *read_swap_cache_async(swp_entry_t entry, + struct vm_area_struct *vma, unsigned long addr) { struct page *found_page, *new_page = NULL; int err; @@ -336,12 +342,12 @@ struct page * read_swap_cache_async(swp_entry_t entry) * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ - spin_lock(&swapper_space.tree_lock); + spin_lock_irq(&swapper_space.tree_lock); found_page = radix_tree_lookup(&swapper_space.page_tree, entry.val); if (found_page) page_cache_get(found_page); - spin_unlock(&swapper_space.tree_lock); + spin_unlock_irq(&swapper_space.tree_lock); if (found_page) break; @@ -349,7 +355,7 @@ struct page * read_swap_cache_async(swp_entry_t entry) * Get a new page to read into from swap. */ if (!new_page) { - new_page = alloc_page(GFP_HIGHUSER); + new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); if (!new_page) break; /* Out of memory */ }