X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fswap_state.c;h=5f7cf2a4cb55f8e9a106776b8d45ec5433c6991e;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=4f251775ef902f3b83c5d680a9caa536773b3a60;hpb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;p=linux-2.6.git diff --git a/mm/swap_state.c b/mm/swap_state.c index 4f251775e..5f7cf2a4c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include @@ -22,10 +24,11 @@ * vmscan's shrink_list, to make sync_page look nicer, and to allow * future use of radix_tree tags in the swap cache. */ -static struct address_space_operations swap_aops = { +static const struct address_space_operations swap_aops = { .writepage = swap_writepage, .sync_page = block_sync_page, .set_page_dirty = __set_page_dirty_nobuffers, + .migratepage = migrate_page, }; static struct backing_dev_info swap_backing_dev_info = { @@ -35,12 +38,11 @@ static struct backing_dev_info swap_backing_dev_info = { struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), - .tree_lock = RW_LOCK_UNLOCKED, + .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, }; -EXPORT_SYMBOL(swapper_space); #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) @@ -67,8 +69,8 @@ void show_swap_cache_info(void) * __add_to_swap_cache resembles add_to_page_cache on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ -static int __add_to_swap_cache(struct page *page, - swp_entry_t entry, int gfp_mask) +static int __add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp_mask) { int error; @@ -83,9 +85,9 @@ static int __add_to_swap_cache(struct page *page, page_cache_get(page); SetPageLocked(page); SetPageSwapCache(page); - page->private = entry.val; + set_page_private(page, entry.val); total_swapcache_pages++; - pagecache_acct(1); + __inc_zone_page_state(page, NR_FILE_PAGES); } write_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); @@ -124,12 +126,13 @@ void __delete_from_swap_cache(struct page *page) BUG_ON(!PageLocked(page)); BUG_ON(!PageSwapCache(page)); BUG_ON(PageWriteback(page)); + BUG_ON(PagePrivate(page)); - radix_tree_delete(&swapper_space.page_tree, page->private); - page->private = 0; + radix_tree_delete(&swapper_space.page_tree, page_private(page)); + set_page_private(page, 0); ClearPageSwapCache(page); total_swapcache_pages--; - pagecache_acct(-1); + __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); } @@ -140,13 +143,12 @@ void __delete_from_swap_cache(struct page *page) * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ -int add_to_swap(struct page * page) +int add_to_swap(struct page * page, gfp_t gfp_mask) { swp_entry_t entry; int err; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); for (;;) { entry = get_swap_page(); @@ -165,7 +167,7 @@ int add_to_swap(struct page * page) * Add it to the swap cache and mark it dirty */ err = __add_to_swap_cache(page, entry, - GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN); + gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); switch (err) { case 0: /* Success */ @@ -196,12 +198,7 @@ void delete_from_swap_cache(struct page *page) { swp_entry_t entry; - BUG_ON(!PageSwapCache(page)); - BUG_ON(!PageLocked(page)); - BUG_ON(PageWriteback(page)); - BUG_ON(PagePrivate(page)); - - entry.val = page->private; + entry.val = page_private(page); write_lock_irq(&swapper_space.tree_lock); __delete_from_swap_cache(page); @@ -263,8 +260,7 @@ static inline void free_swap_cache(struct page *page) /* * Perform a free_page(), also freeing any swap cache associated with - * this page if it is the last user of the page. Can not do a lock_page, - * as we are holding the page_table_lock spinlock. + * this page if it is the last user of the page. */ void free_page_and_swap_cache(struct page *page) { @@ -278,12 +274,11 @@ void free_page_and_swap_cache(struct page *page) */ void free_pages_and_swap_cache(struct page **pages, int nr) { - int chunk = 16; struct page **pagep = pages; lru_add_drain(); while (nr) { - int todo = min(chunk, nr); + int todo = min(nr, PAGEVEC_SIZE); int i; for (i = 0; i < todo; i++)