2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
10 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/pagevec.h>
18 void default_unplug_io_fn(struct backing_dev_info *bdi)
21 EXPORT_SYMBOL(default_unplug_io_fn);
23 struct backing_dev_info default_backing_dev_info = {
24 .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
26 .unplug_io_fn = default_unplug_io_fn,
28 EXPORT_SYMBOL_GPL(default_backing_dev_info);
31 * Initialise a struct file's readahead state
34 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
36 memset(ra, 0, sizeof(*ra));
37 ra->ra_pages = mapping->backing_dev_info->ra_pages;
38 ra->average = ra->ra_pages / 2;
40 EXPORT_SYMBOL(file_ra_state_init);
43 * Return max readahead size for this inode in number-of-pages.
45 static inline unsigned long get_max_readahead(struct file_ra_state *ra)
50 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
52 return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
55 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
58 * read_cache_pages - populate an address space with some pages, and
59 * start reads against them.
60 * @mapping: the address_space
61 * @pages: The address of a list_head which contains the target pages. These
62 * pages have their ->index populated and are otherwise uninitialised.
63 * @filler: callback routine for filling a single page.
64 * @data: private data for the callback routine.
66 * Hides the details of the LRU cache etc from the filesystems.
68 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
69 int (*filler)(void *, struct page *), void *data)
72 struct pagevec lru_pvec;
75 pagevec_init(&lru_pvec, 0);
77 while (!list_empty(pages)) {
78 page = list_to_page(pages);
80 if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
81 page_cache_release(page);
84 ret = filler(data, page);
85 if (!pagevec_add(&lru_pvec, page))
86 __pagevec_lru_add(&lru_pvec);
88 while (!list_empty(pages)) {
91 victim = list_to_page(pages);
92 list_del(&victim->lru);
93 page_cache_release(victim);
98 pagevec_lru_add(&lru_pvec);
102 EXPORT_SYMBOL(read_cache_pages);
104 static int read_pages(struct address_space *mapping, struct file *filp,
105 struct list_head *pages, unsigned nr_pages)
108 struct pagevec lru_pvec;
111 if (mapping->a_ops->readpages) {
112 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
116 pagevec_init(&lru_pvec, 0);
117 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
118 struct page *page = list_to_page(pages);
119 list_del(&page->lru);
120 if (!add_to_page_cache(page, mapping,
121 page->index, GFP_KERNEL)) {
122 mapping->a_ops->readpage(filp, page);
123 if (!pagevec_add(&lru_pvec, page))
124 __pagevec_lru_add(&lru_pvec);
126 page_cache_release(page);
129 pagevec_lru_add(&lru_pvec);
137 * The fields in struct file_ra_state represent the most-recently-executed
140 * start: Page index at which we started the readahead
141 * size: Number of pages in that read
142 * Together, these form the "current window".
143 * Together, start and size represent the `readahead window'.
144 * next_size: The number of pages to read on the next readahead miss.
145 * Has the magical value -1UL if readahead has been disabled.
146 * prev_page: The page which the readahead algorithm most-recently inspected.
147 * prev_page is mainly an optimisation: if page_cache_readahead
148 * sees that it is again being called for a page which it just
149 * looked at, it can return immediately without making any state
152 * ahead_size: Together, these form the "ahead window".
153 * ra_pages: The externally controlled max readahead for this fd.
155 * When readahead is in the "maximally shrunk" state (next_size == -1UL),
156 * readahead is disabled. In this state, prev_page and size are used, inside
157 * handle_ra_miss(), to detect the resumption of sequential I/O. Once there
158 * has been a decent run of sequential I/O (defined by get_min_readahead),
159 * readahead is reenabled.
161 * The readahead code manages two windows - the "current" and the "ahead"
162 * windows. The intent is that while the application is walking the pages
163 * in the current window, I/O is underway on the ahead window. When the
164 * current window is fully traversed, it is replaced by the ahead window
165 * and the ahead window is invalidated. When this copying happens, the
166 * new current window's pages are probably still locked. When I/O has
167 * completed, we submit a new batch of I/O, creating a new ahead window.
171 * ----|----------------|----------------|-----
173 * ^ahead_start ^ahead_start+ahead_size
175 * ^ When this page is read, we submit I/O for the
178 * A `readahead hit' occurs when a read request is made against a page which is
179 * inside the current window. Hits are good, and the window size (next_size)
180 * is grown aggressively when hits occur. Two pages are added to the next
181 * window size on each hit, which will end up doubling the next window size by
182 * the time I/O is submitted for it.
184 * If readahead hits are more sparse (say, the application is only reading
185 * every second page) then the window will build more slowly.
187 * On a readahead miss (the application seeked away) the readahead window is
188 * shrunk by 25%. We don't want to drop it too aggressively, because it is a
189 * good assumption that an application which has built a good readahead window
190 * will continue to perform linear reads. Either at the new file position, or
191 * at the old one after another seek.
193 * After enough misses, readahead is fully disabled. (next_size = -1UL).
195 * There is a special-case: if the first page which the application tries to
196 * read happens to be the first page of the file, it is assumed that a linear
197 * read is about to happen and the window is immediately set to half of the
200 * A page request at (start + size) is not a miss at all - it's just a part of
201 * sequential file reading.
203 * This function is to be called for every page which is read, rather than when
204 * it is time to perform readahead. This is so the readahead algorithm can
205 * centrally work out the access patterns. This could be costly with many tiny
206 * read()s, so we specifically optimise for that case with prev_page.
210 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
211 * the pages first, then submits them all for I/O. This avoids the very bad
212 * behaviour which would occur if page allocations are causing VM writeback.
213 * We really don't want to intermingle reads and writes like that.
215 * Returns the number of pages which actually had IO started against them.
218 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
219 unsigned long offset, unsigned long nr_to_read)
221 struct inode *inode = mapping->host;
223 unsigned long end_index; /* The last page we want to read */
224 LIST_HEAD(page_pool);
227 loff_t isize = i_size_read(inode);
232 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
235 * Preallocate as many pages as we will need.
237 spin_lock_irq(&mapping->tree_lock);
238 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
239 unsigned long page_offset = offset + page_idx;
241 if (page_offset > end_index)
244 page = radix_tree_lookup(&mapping->page_tree, page_offset);
248 spin_unlock_irq(&mapping->tree_lock);
249 page = page_cache_alloc_cold(mapping);
250 spin_lock_irq(&mapping->tree_lock);
253 page->index = page_offset;
254 list_add(&page->lru, &page_pool);
257 spin_unlock_irq(&mapping->tree_lock);
260 * Now start the IO. We ignore I/O errors - if the page is not
261 * uptodate then the caller will launch readpage again, and
262 * will then handle the error.
265 read_pages(mapping, filp, &page_pool, ret);
266 BUG_ON(!list_empty(&page_pool));
272 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
275 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
276 unsigned long offset, unsigned long nr_to_read)
280 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
286 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
288 if (this_chunk > nr_to_read)
289 this_chunk = nr_to_read;
290 err = __do_page_cache_readahead(mapping, filp,
297 offset += this_chunk;
298 nr_to_read -= this_chunk;
304 * This version skips the IO if the queue is read-congested, and will tell the
305 * block layer to abandon the readahead if request allocation would block.
307 * force_page_cache_readahead() will ignore queue congestion and will block on
310 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
311 unsigned long offset, unsigned long nr_to_read)
313 if (!bdi_read_congested(mapping->backing_dev_info))
314 return __do_page_cache_readahead(mapping, filp,
320 * Check how effective readahead is being. If the amount of started IO is
321 * less than expected then the file is partly or fully in pagecache and
322 * readahead isn't helping. Shrink the window.
324 * But don't shrink it too much - the application may read the same page
328 check_ra_success(struct file_ra_state *ra, pgoff_t attempt,
329 pgoff_t actual, pgoff_t orig_next_size)
332 if (orig_next_size > 1) {
333 ra->next_size = orig_next_size - 1;
335 ra->ahead_size = ra->next_size;
337 ra->next_size = -1UL;
344 * page_cache_readahead is the main function. If performs the adaptive
345 * readahead window size management and submits the readahead I/O.
348 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
349 struct file *filp, unsigned long offset)
353 unsigned orig_next_size;
356 unsigned long preoffset=0;
359 * Here we detect the case where the application is performing
360 * sub-page sized reads. We avoid doing extra work and bogusly
361 * perturbing the readahead window expansion logic.
362 * If next_size is zero, this is the very first read for this
363 * file handle, or the window is maximally shrunk.
365 if (offset == ra->prev_page) {
366 if (ra->next_size != 0)
370 if (ra->next_size == -1UL)
371 goto out; /* Maximally shrunk */
373 max = get_max_readahead(ra);
375 goto out; /* No readahead */
377 min = get_min_readahead(ra);
378 orig_next_size = ra->next_size;
380 if (ra->next_size == 0) {
382 * Special case - first read.
383 * We'll assume it's a whole-file read, and
384 * grow the window fast.
387 ra->next_size = max / 2;
388 ra->prev_page = offset;
393 if (offset == ra->prev_page + 1) {
394 if (ra->serial_cnt <= (max * 2))
397 ra->average = (ra->average + ra->serial_cnt) / 2;
400 preoffset = ra->prev_page;
401 ra->prev_page = offset;
403 if (offset >= ra->start && offset <= (ra->start + ra->size)) {
405 * A readahead hit. Either inside the window, or one
406 * page beyond the end. Expand the next readahead size.
411 * A miss - lseek, pagefault, pread, etc. Shrink the readahead
417 if ((long)ra->next_size > (long)max)
419 if ((long)ra->next_size <= 0L) {
420 ra->next_size = -1UL;
422 goto out; /* Readahead is off */
426 * Is this request outside the current window?
428 if (offset < ra->start || offset >= (ra->start + ra->size)) {
430 * A miss against the current window. Have we merely
431 * advanced into the ahead window?
433 if (offset == ra->ahead_start) {
435 * Yes, we have. The ahead window now becomes
436 * the current window.
438 ra->start = ra->ahead_start;
439 ra->size = ra->ahead_size;
440 ra->prev_page = ra->start;
445 * Control now returns, probably to sleep until I/O
446 * completes against the first ahead page.
447 * When the second page in the old ahead window is
448 * requested, control will return here and more I/O
449 * will be submitted to build the new ahead window.
455 * This is the "unusual" path. We come here during
456 * startup or after an lseek. We invalidate the
457 * ahead window and get some I/O underway for the new
460 if (!first_access && preoffset >= ra->start &&
461 preoffset < (ra->start + ra->size)) {
462 /* Heuristic: If 'n' pages were
463 * accessed in the current window, there
464 * is a high probability that around 'n' pages
465 * shall be used in the next current window.
467 * To minimize lazy-readahead triggered
468 * in the next current window, read in
471 ra->next_size = preoffset - ra->start + 2;
474 ra->size = ra->next_size;
475 ra->ahead_start = 0; /* Invalidate these */
477 actual = do_page_cache_readahead(mapping, filp, offset,
481 * do not adjust the readahead window size the first
482 * time, the ahead window might get closed if all
483 * the pages are already in the cache.
485 check_ra_success(ra, ra->size, actual, orig_next_size);
489 * This read request is within the current window. It may be
490 * time to submit I/O for the ahead window while the
491 * application is about to step into the ahead window.
493 if (ra->ahead_start == 0) {
495 * if the average io-size is less than maximum
496 * readahead size of the file the io pattern is
497 * sequential. Hence bring in the readahead window
499 * Else the i/o pattern is random. Bring
500 * in the readahead window only if the last page of
501 * the current window is accessed (lazy readahead).
503 unsigned long average = ra->average;
505 if (ra->serial_cnt > average)
506 average = (ra->serial_cnt + ra->average) / 2;
508 if ((average >= max) || (offset == (ra->start +
510 ra->ahead_start = ra->start + ra->size;
511 ra->ahead_size = ra->next_size;
512 actual = do_page_cache_readahead(mapping, filp,
513 ra->ahead_start, ra->ahead_size);
514 check_ra_success(ra, ra->ahead_size,
515 actual, orig_next_size);
525 * handle_ra_miss() is called when it is known that a page which should have
526 * been present in the pagecache (we just did some readahead there) was in fact
527 * not found. This will happen if it was evicted by the VM (readahead
528 * thrashing) or if the readahead window is maximally shrunk.
530 * If the window has been maximally shrunk (next_size == -1UL) then look to see
531 * if we are getting misses against sequential file offsets. If so, and this
532 * persists then resume readahead.
534 * Otherwise we're thrashing, so shrink the readahead window by three pages.
535 * This is because it is grown by two pages on a readahead hit. Theory being
536 * that the readahead window size will stabilise around the maximum level at
537 * which there is no thrashing.
539 void handle_ra_miss(struct address_space *mapping,
540 struct file_ra_state *ra, pgoff_t offset)
542 if (ra->next_size == -1UL) {
543 const unsigned long max = get_max_readahead(ra);
545 if (offset != ra->prev_page + 1) {
546 ra->size = ra->size?ra->size-1:0; /* Not sequential */
548 ra->size++; /* A sequential read */
549 if (ra->size >= max) { /* Resume readahead */
550 ra->start = offset - max;
557 ra->prev_page = offset;
559 const unsigned long min = get_min_readahead(ra);
562 if (ra->next_size < min)
568 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
569 * sensible upper limit.
571 unsigned long max_sane_readahead(unsigned long nr)
573 unsigned long active;
574 unsigned long inactive;
577 get_zone_counts(&active, &inactive, &free);
578 return min(nr, (inactive + free) / 2);