2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
18 #include <asm/pgtable.h>
21 * swapper_space is a fiction, retained to simplify the path through
22 * vmscan's shrink_list. Only those fields initialized below are used.
24 static struct address_space_operations swap_aops = {
25 .writepage = swap_writepage,
26 .sync_page = block_sync_page,
27 .set_page_dirty = __set_page_dirty_nobuffers,
30 static struct backing_dev_info swap_backing_dev_info = {
31 .memory_backed = 1, /* Does not contribute to dirty memory */
32 .unplug_io_fn = swap_unplug_io_fn,
35 struct address_space swapper_space = {
36 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC),
37 .tree_lock = SPIN_LOCK_UNLOCKED,
39 .backing_dev_info = &swap_backing_dev_info,
42 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
45 unsigned long add_total;
46 unsigned long del_total;
47 unsigned long find_success;
48 unsigned long find_total;
49 unsigned long noent_race;
50 unsigned long exist_race;
53 void show_swap_cache_info(void)
55 printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
56 swap_cache_info.add_total, swap_cache_info.del_total,
57 swap_cache_info.find_success, swap_cache_info.find_total,
58 swap_cache_info.noent_race, swap_cache_info.exist_race);
62 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
63 * but sets SwapCache flag and private instead of mapping and index.
65 static int __add_to_swap_cache(struct page *page,
66 swp_entry_t entry, int gfp_mask)
70 BUG_ON(PageSwapCache(page));
71 BUG_ON(PagePrivate(page));
72 error = radix_tree_preload(gfp_mask);
74 spin_lock_irq(&swapper_space.tree_lock);
75 error = radix_tree_insert(&swapper_space.page_tree,
80 SetPageSwapCache(page);
81 page->private = entry.val;
82 total_swapcache_pages++;
85 spin_unlock_irq(&swapper_space.tree_lock);
86 radix_tree_preload_end();
91 static int add_to_swap_cache(struct page *page, swp_entry_t entry)
95 if (!swap_duplicate(entry)) {
96 INC_CACHE_INFO(noent_race);
99 error = __add_to_swap_cache(page, entry, GFP_KERNEL);
101 * Anon pages are already on the LRU, we don't run lru_cache_add here.
105 if (error == -EEXIST)
106 INC_CACHE_INFO(exist_race);
109 INC_CACHE_INFO(add_total);
114 * This must be called only on pages that have
115 * been verified to be in the swap cache.
117 void __delete_from_swap_cache(struct page *page)
119 BUG_ON(!PageLocked(page));
120 BUG_ON(!PageSwapCache(page));
121 BUG_ON(PageWriteback(page));
123 radix_tree_delete(&swapper_space.page_tree, page->private);
125 ClearPageSwapCache(page);
126 total_swapcache_pages--;
128 INC_CACHE_INFO(del_total);
132 * add_to_swap - allocate swap space for a page
133 * @page: page we want to move to swap
135 * Allocate swap space for the page and add the page to the
136 * swap cache. Caller needs to hold the page lock.
138 int add_to_swap(struct page * page)
144 if (!PageLocked(page))
148 entry = get_swap_page();
152 /* Radix-tree node allocations are performing
153 * GFP_ATOMIC allocations under PF_MEMALLOC.
154 * They can completely exhaust the page allocator.
156 * So PF_MEMALLOC is dropped here. This causes the slab
157 * allocations to fail earlier, so radix-tree nodes will
158 * then be allocated from the mempool reserves.
160 * We're still using __GFP_HIGH for radix-tree node
161 * allocations, so some of the emergency pools are available,
162 * just not all of them.
165 pf_flags = current->flags;
166 current->flags &= ~PF_MEMALLOC;
169 * Add it to the swap cache and mark it dirty
171 err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
173 if (pf_flags & PF_MEMALLOC)
174 current->flags |= PF_MEMALLOC;
177 case 0: /* Success */
178 SetPageUptodate(page);
180 INC_CACHE_INFO(add_total);
183 /* Raced with "speculative" read_swap_cache_async */
184 INC_CACHE_INFO(exist_race);
188 /* -ENOMEM radix-tree allocation failure */
196 * This must be called only on pages that have
197 * been verified to be in the swap cache and locked.
198 * It will never put the page into the free list,
199 * the caller has a reference on the page.
201 void delete_from_swap_cache(struct page *page)
205 BUG_ON(!PageSwapCache(page));
206 BUG_ON(!PageLocked(page));
207 BUG_ON(PageWriteback(page));
208 BUG_ON(PagePrivate(page));
210 entry.val = page->private;
212 spin_lock_irq(&swapper_space.tree_lock);
213 __delete_from_swap_cache(page);
214 spin_unlock_irq(&swapper_space.tree_lock);
217 page_cache_release(page);
221 * Strange swizzling function only for use by shmem_writepage
223 int move_to_swap_cache(struct page *page, swp_entry_t entry)
225 int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
227 remove_from_page_cache(page);
228 page_cache_release(page); /* pagecache ref */
229 if (!swap_duplicate(entry))
232 INC_CACHE_INFO(add_total);
233 } else if (err == -EEXIST)
234 INC_CACHE_INFO(exist_race);
239 * Strange swizzling function for shmem_getpage (and shmem_unuse)
241 int move_from_swap_cache(struct page *page, unsigned long index,
242 struct address_space *mapping)
244 int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
246 delete_from_swap_cache(page);
247 /* shift page from clean_pages to dirty_pages list */
248 ClearPageDirty(page);
249 set_page_dirty(page);
255 * If we are the only user, then try to free up the swap cache.
257 * Its ok to check for PageSwapCache without the page lock
258 * here because we are going to recheck again inside
259 * exclusive_swap_page() _with_ the lock.
262 static inline void free_swap_cache(struct page *page)
264 if (PageSwapCache(page) && !TestSetPageLocked(page)) {
265 remove_exclusive_swap_page(page);
271 * Perform a free_page(), also freeing any swap cache associated with
272 * this page if it is the last user of the page. Can not do a lock_page,
273 * as we are holding the page_table_lock spinlock.
275 void free_page_and_swap_cache(struct page *page)
277 free_swap_cache(page);
278 page_cache_release(page);
282 * Passed an array of pages, drop them all from swapcache and then release
283 * them. They are removed from the LRU and freed if this is their last use.
285 void free_pages_and_swap_cache(struct page **pages, int nr)
288 struct page **pagep = pages;
292 int todo = min(chunk, nr);
295 for (i = 0; i < todo; i++)
296 free_swap_cache(pagep[i]);
297 release_pages(pagep, todo, 0);
304 * Lookup a swap entry in the swap cache. A found page will be returned
305 * unlocked and with its refcount incremented - we rely on the kernel
306 * lock getting page table operations atomic even if we drop the page
307 * lock before returning.
309 struct page * lookup_swap_cache(swp_entry_t entry)
313 spin_lock_irq(&swapper_space.tree_lock);
314 page = radix_tree_lookup(&swapper_space.page_tree, entry.val);
316 page_cache_get(page);
317 INC_CACHE_INFO(find_success);
319 spin_unlock_irq(&swapper_space.tree_lock);
320 INC_CACHE_INFO(find_total);
325 * Locate a page of swap in physical memory, reserving swap cache space
326 * and reading the disk if it is not already cached.
327 * A failure return means that either the page allocation failed or that
328 * the swap entry is no longer in use.
330 struct page *read_swap_cache_async(swp_entry_t entry,
331 struct vm_area_struct *vma, unsigned long addr)
333 struct page *found_page, *new_page = NULL;
338 * First check the swap cache. Since this is normally
339 * called after lookup_swap_cache() failed, re-calling
340 * that would confuse statistics.
342 spin_lock_irq(&swapper_space.tree_lock);
343 found_page = radix_tree_lookup(&swapper_space.page_tree,
346 page_cache_get(found_page);
347 spin_unlock_irq(&swapper_space.tree_lock);
352 * Get a new page to read into from swap.
355 new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
357 break; /* Out of memory */
361 * Associate the page with swap entry in the swap cache.
362 * May fail (-ENOENT) if swap entry has been freed since
363 * our caller observed it. May fail (-EEXIST) if there
364 * is already a page associated with this entry in the
365 * swap cache: added by a racing read_swap_cache_async,
366 * or by try_to_swap_out (or shmem_writepage) re-using
367 * the just freed swap entry for an existing page.
368 * May fail (-ENOMEM) if radix-tree node allocation failed.
370 err = add_to_swap_cache(new_page, entry);
373 * Initiate read into locked page and return.
375 lru_cache_add_active(new_page);
376 swap_readpage(NULL, new_page);
379 } while (err != -ENOENT && err != -ENOMEM);
382 page_cache_release(new_page);