X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fswap_state.c;h=5f7cf2a4cb55f8e9a106776b8d45ec5433c6991e;hb=refs%2Fheads%2Fvserver;hp=b6232384d4110bf4a7c0967281d0a565da1cc912;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/mm/swap_state.c b/mm/swap_state.c index b6232384d..5f7cf2a4c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -6,33 +6,41 @@ * * Rewritten to use page cache, (C) 1998 Stephen Tweedie */ - +#include #include #include #include #include #include +#include #include +#include +#include #include /* * swapper_space is a fiction, retained to simplify the path through - * vmscan's shrink_list. Only those fields initialized below are used. + * vmscan's shrink_list, to make sync_page look nicer, and to allow + * future use of radix_tree tags in the swap cache. */ -static struct address_space_operations swap_aops = { +static const struct address_space_operations swap_aops = { .writepage = swap_writepage, + .sync_page = block_sync_page, + .set_page_dirty = __set_page_dirty_nobuffers, + .migratepage = migrate_page, }; static struct backing_dev_info swap_backing_dev_info = { - .state = 0, /* uncongested */ + .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, + .unplug_io_fn = swap_unplug_io_fn, }; struct address_space swapper_space = { - .page_tree = RADIX_TREE_INIT(GFP_ATOMIC), - .tree_lock = SPIN_LOCK_UNLOCKED, - .nrpages = 0, /* total_swapcache_pages */ + .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), + .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, + .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, }; @@ -53,14 +61,16 @@ void show_swap_cache_info(void) swap_cache_info.add_total, swap_cache_info.del_total, swap_cache_info.find_success, swap_cache_info.find_total, swap_cache_info.noent_race, swap_cache_info.exist_race); + printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10)); + printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); } /* * __add_to_swap_cache resembles add_to_page_cache on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ -static int __add_to_swap_cache(struct page *page, - swp_entry_t entry, int gfp_mask) +static int __add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp_mask) { int error; @@ -68,19 +78,18 @@ static int __add_to_swap_cache(struct page *page, BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); if (!error) { - page_cache_get(page); - spin_lock(&swapper_space.tree_lock); + write_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (!error) { + page_cache_get(page); SetPageLocked(page); SetPageSwapCache(page); - page->private = entry.val; + set_page_private(page, entry.val); total_swapcache_pages++; - pagecache_acct(1); - } else - page_cache_release(page); - spin_unlock(&swapper_space.tree_lock); + __inc_zone_page_state(page, NR_FILE_PAGES); + } + write_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); } return error; @@ -117,12 +126,13 @@ void __delete_from_swap_cache(struct page *page) BUG_ON(!PageLocked(page)); BUG_ON(!PageSwapCache(page)); BUG_ON(PageWriteback(page)); + BUG_ON(PagePrivate(page)); - radix_tree_delete(&swapper_space.page_tree, page->private); - page->private = 0; + radix_tree_delete(&swapper_space.page_tree, page_private(page)); + set_page_private(page, 0); ClearPageSwapCache(page); total_swapcache_pages--; - pagecache_acct(-1); + __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); } @@ -133,43 +143,31 @@ void __delete_from_swap_cache(struct page *page) * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ -int add_to_swap(struct page * page) +int add_to_swap(struct page * page, gfp_t gfp_mask) { swp_entry_t entry; - int pf_flags; int err; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); for (;;) { entry = get_swap_page(); if (!entry.val) return 0; - /* Radix-tree node allocations are performing - * GFP_ATOMIC allocations under PF_MEMALLOC. - * They can completely exhaust the page allocator. - * - * So PF_MEMALLOC is dropped here. This causes the slab - * allocations to fail earlier, so radix-tree nodes will - * then be allocated from the mempool reserves. + /* + * Radix-tree node allocations from PF_MEMALLOC contexts could + * completely exhaust the page allocator. __GFP_NOMEMALLOC + * stops emergency reserves from being allocated. * - * We're still using __GFP_HIGH for radix-tree node - * allocations, so some of the emergency pools are available, - * just not all of them. + * TODO: this could cause a theoretical memory reclaim + * deadlock in the swap out path. */ - - pf_flags = current->flags; - current->flags &= ~PF_MEMALLOC; - /* * Add it to the swap cache and mark it dirty */ - err = __add_to_swap_cache(page, entry, GFP_ATOMIC); - - if (pf_flags & PF_MEMALLOC) - current->flags |= PF_MEMALLOC; + err = __add_to_swap_cache(page, entry, + gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); switch (err) { case 0: /* Success */ @@ -200,16 +198,11 @@ void delete_from_swap_cache(struct page *page) { swp_entry_t entry; - BUG_ON(!PageSwapCache(page)); - BUG_ON(!PageLocked(page)); - BUG_ON(PageWriteback(page)); - BUG_ON(PagePrivate(page)); - - entry.val = page->private; + entry.val = page_private(page); - spin_lock(&swapper_space.tree_lock); + write_lock_irq(&swapper_space.tree_lock); __delete_from_swap_cache(page); - spin_unlock(&swapper_space.tree_lock); + write_unlock_irq(&swapper_space.tree_lock); swap_free(entry); page_cache_release(page); @@ -267,8 +260,7 @@ static inline void free_swap_cache(struct page *page) /* * Perform a free_page(), also freeing any swap cache associated with - * this page if it is the last user of the page. Can not do a lock_page, - * as we are holding the page_table_lock spinlock. + * this page if it is the last user of the page. */ void free_page_and_swap_cache(struct page *page) { @@ -282,12 +274,11 @@ void free_page_and_swap_cache(struct page *page) */ void free_pages_and_swap_cache(struct page **pages, int nr) { - int chunk = 16; struct page **pagep = pages; lru_add_drain(); while (nr) { - int todo = min(chunk, nr); + int todo = min(nr, PAGEVEC_SIZE); int i; for (i = 0; i < todo; i++) @@ -308,13 +299,11 @@ struct page * lookup_swap_cache(swp_entry_t entry) { struct page *page; - spin_lock(&swapper_space.tree_lock); - page = radix_tree_lookup(&swapper_space.page_tree, entry.val); - if (page) { - page_cache_get(page); + page = find_get_page(&swapper_space, entry.val); + + if (page) INC_CACHE_INFO(find_success); - } - spin_unlock(&swapper_space.tree_lock); + INC_CACHE_INFO(find_total); return page; } @@ -325,7 +314,8 @@ struct page * lookup_swap_cache(swp_entry_t entry) * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. */ -struct page * read_swap_cache_async(swp_entry_t entry) +struct page *read_swap_cache_async(swp_entry_t entry, + struct vm_area_struct *vma, unsigned long addr) { struct page *found_page, *new_page = NULL; int err; @@ -336,12 +326,7 @@ struct page * read_swap_cache_async(swp_entry_t entry) * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ - spin_lock(&swapper_space.tree_lock); - found_page = radix_tree_lookup(&swapper_space.page_tree, - entry.val); - if (found_page) - page_cache_get(found_page); - spin_unlock(&swapper_space.tree_lock); + found_page = find_get_page(&swapper_space, entry.val); if (found_page) break; @@ -349,7 +334,7 @@ struct page * read_swap_cache_async(swp_entry_t entry) * Get a new page to read into from swap. */ if (!new_page) { - new_page = alloc_page(GFP_HIGHUSER); + new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); if (!new_page) break; /* Out of memory */ }